mpcore.s revision 0baeff3d96eae184e775c1064f1836090446a7bf
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include <sys/privregs.h>
#if !defined(__lint)
#include <sys/segments.h>
#include "assym.h"
#endif
/*
* Our assumptions:
* - We are running in real mode.
* - Interrupts are disabled.
* - Selectors are equal (cs == ds == ss) for all real mode code
* - The GDT, IDT, ktss and page directory has been built for us
*
* Our actions:
* - We start using our GDT by loading correct values in the
* selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL,
* gs=KGS_SEL).
* - We change over to using our IDT.
* - We load the default LDT into the hardware LDT register.
* - We load the default TSS into the hardware task register.
* - call ap_mlsetup(void)
* - call mp_startup(void) indirectly through the T_PC
*
*/
void
real_mode_start(void)
{}
#else /* lint */
#if defined(__amd64)
#if !defined(__GNUC_AS__)
/*
* For vulcan as we need to do a .code32 and mentally invert the
* meaning of the addr16 and data16 prefixes to get 32-bit access when
* generating code to be executed in 16-bit mode (sigh...)
*/
/*
* Helps in debugging by giving us the fault address.
*
* Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
*/
/*
* Enable protected-mode, write protect, and alignment mask
*/
/*
* Do a jmp immediately after writing to cr0 when enabling protected
* mode to clear the real mode prefetch queue (per Intel's docs)
*/
/*
* 16-bit protected mode is now active, so prepare to turn on long
* mode.
*
* Note that we currently assume that if we're attempting to run a
* kernel compiled with (__amd64) #defined, the target CPU has long
* mode support.
*/
#if 0
/*
* If there's a chance this might not be true, the following test should
* be done, with the no_long_mode branch then doing something
* appropriate:
*/
cpuid /* get extended feature flags */
#endif
/*
* Add any initial cr4 bits
*/
/*
* Enable PAE mode (CR4.PAE)
*/
/*
* Point cr3 to the 64-bit long mode page tables.
*
* Note that these MUST exist in 32-bit space, as we don't have
* a way to load %cr3 with a 64-bit base address for the page tables
* until the CPU is actually executing in 64-bit long mode.
*/
/*
* Set long mode enable in EFER (EFER.LME = 1)
*/
/*
* Finally, turn on paging (CR0.PG = 1) to activate long mode.
*/
/*
* The instruction after enabling paging in CR0 MUST be a branch.
*/
/*
* Long mode is now active but since we're still running with the
* original 16-bit CS we're actually in 16-bit compatability mode.
*
* We have to load an intermediate GDT and IDT here that we know are
* in 32-bit space before we can use the kernel's GDT and IDT, which
* may be in the 64-bit address space, and since we're in compatability
* mode, we only have access to 16 and 32-bit instructions at the
* moment.
*/
/*
* Do a far transfer to 64-bit mode. Set the CS selector to a 64-bit
* long mode selector (CS.L=1) in the temporary 32-bit GDT and jump
* to the real mode platter address of long_mode 64 as until the 64-bit
* CS is in place we don't have access to 64-bit instructions and thus
* can't reference a 64-bit %rip.
*/
/*
* We are now running in long mode with a 64-bit CS (EFER.LMA=1,
* CS.L=1) so we now have access to 64-bit instructions.
*
* First, set the 64-bit GDT base.
*/
/*
* Save the CPU number in %r11; get the value here since it's saved in
* the real mode platter.
*/
/*
* Add rm_platter_pa to %rsp to point it to the same location as seen
* from 64-bit mode.
*/
/*
* Now do an lretq to load CS with the appropriate selector for the
* kernel's 64-bit GDT and to start executing 64-bit setup code at the
* virtual address where boot originally loaded this code rather than
* the copy in the real mode platter's rm_code array as we've been
* doing so far.
*/
/*
* Complete the balance of the setup we need to before executing
* 64-bit kernel code (namely init rsp, TSS, LGDT, FS and GS).
*/
/*
* Set GS to the address of the per-cpu structure as contained in
* cpu[cpu_number].
*
* Unfortunately there's no way to set the 64-bit gsbase with a mov,
* so we have to stuff the low 32 bits in %eax and the high 32 bits in
* %edx, then call wrmsr.
*/
/*
* Init FS and KernelGSBase.
*
* Based on code in mlsetup(), set them both to 8G (which shouldn't be
* valid until some 64-bit processes run); this will then cause an
* exception in any code that tries to index off them before they are
* properly setup.
*/
/*
* Init %rsp to the exception stack set in tss_ist1 and create a legal
* AMD64 ABI stack frame
*/
pushq $0 /* null return address */
pushq $0 /* null frame pointer terminates stack trace */
/*
* Before proceeding, enable usage of the page table NX bit if that's
* how the page tables are set up.
*/
/*
* Complete the rest of the setup and call mp_startup().
*/
/* not reached */
int $20 /* whoops, returned somehow! */
#else /* __GNUC_AS__ */
/*
* NOTE: The GNU assembler automatically does the right thing to
* generate data size operand prefixes based on the code size
* generation mode (e.g. .code16, .code32, .code64) and as such
* prefixes need not be used on instructions EXCEPT in the case
* of address prefixes for code for which the reference is not
* automatically of the default operand size.
*/
/*
* Helps in debugging by giving us the fault address.
*
* Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
*/
/*
* Enable protected-mode, write protect, and alignment mask
*/
/*
* Do a jmp immediately after writing to cr0 when enabling protected
* mode to clear the real mode prefetch queue (per Intel's docs)
*/
/*
* 16-bit protected mode is now active, so prepare to turn on long
* mode.
*
* Note that we currently assume that if we're attempting to run a
* kernel compiled with (__amd64) #defined, the target CPU has long
* mode support.
*/
#if 0
/*
* If there's a chance this might not be true, the following test should
* be done, with the no_long_mode branch then doing something
* appropriate:
*/
cpuid /* get extended feature flags */
#endif
/*
* Add any initial cr4 bits
*/
/*
* Enable PAE mode (CR4.PAE)
*/
/*
* Point cr3 to the 64-bit long mode page tables.
*
* Note that these MUST exist in 32-bit space, as we don't have
* a way to load %cr3 with a 64-bit base address for the page tables
* until the CPU is actually executing in 64-bit long mode.
*/
/*
* Set long mode enable in EFER (EFER.LME = 1)
*/
/*
* Finally, turn on paging (CR0.PG = 1) to activate long mode.
*/
/*
* The instruction after enabling paging in CR0 MUST be a branch.
*/
/*
* Long mode is now active but since we're still running with the
* original 16-bit CS we're actually in 16-bit compatability mode.
*
* We have to load an intermediate GDT and IDT here that we know are
* in 32-bit space before we can use the kernel's GDT and IDT, which
* may be in the 64-bit address space, and since we're in compatability
* mode, we only have access to 16 and 32-bit instructions at the
* moment.
*/
/*
* Do a far transfer to 64-bit mode. Set the CS selector to a 64-bit
* long mode selector (CS.L=1) in the temporary 32-bit GDT and jump
* to the real mode platter address of long_mode 64 as until the 64-bit
* CS is in place we don't have access to 64-bit instructions and thus
* can't reference a 64-bit %rip.
*/
/*
* We are now running in long mode with a 64-bit CS (EFER.LMA=1,
* CS.L=1) so we now have access to 64-bit instructions.
*
* First, set the 64-bit GDT base.
*/
/*
* Save the CPU number in %r11; get the value here since it's saved in
* the real mode platter.
*/
/*
* Add rm_platter_pa to %rsp to point it to the same location as seen
* from 64-bit mode.
*/
/*
* Now do an lretq to load CS with the appropriate selector for the
* kernel's 64-bit GDT and to start executing 64-bit setup code at the
* virtual address where boot originally loaded this code rather than
* the copy in the real mode platter's rm_code array as we've been
* doing so far.
*/
/*
* Complete the balance of the setup we need to before executing
* 64-bit kernel code (namely init rsp, TSS, LGDT, FS and GS).
*/
/*
* Set GS to the address of the per-cpu structure as contained in
* cpu[cpu_number].
*
* Unfortunately there's no way to set the 64-bit gsbase with a mov,
* so we have to stuff the low 32 bits in %eax and the high 32 bits in
* %edx, then call wrmsr.
*/
/*
* Init FS and KernelGSBase.
*
* Based on code in mlsetup(), set them both to 8G (which shouldn't be
* valid until some 64-bit processes run); this will then cause an
* exception in any code that tries to index off them before they are
* properly setup.
*/
/*
* Init %rsp to the exception stack set in tss_ist1 and create a legal
* AMD64 ABI stack frame
*/
pushq $0 /* null return address */
pushq $0 /* null frame pointer terminates stack trace */
/*
* Before proceeding, enable usage of the page table NX bit if that's
* how the page tables are set up.
*/
/*
* Complete the rest of the setup and call mp_startup().
*/
/* not reached */
int $20 /* whoops, returned somehow! */
#endif /* !__GNUC_AS__ */
#if !defined(__GNUC_AS__)
/*
* Helps in debugging by giving us the fault address.
*
* Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
*/
/*
* Enable protected-mode, paging, write protect, and alignment mask
*/
/*
* At this point we are with kernel's cs and proper eip.
*
* We will be executing not from the copy in real mode platter,
* but from the original code where boot loaded us.
*
* By this time GDT and IDT are loaded as is cr3.
*/
/*
* Before going any further, enable usage of page table NX bit if
* that's how our page tables are set up.
*/
/* not reached */
int $20 /* whoops, returned somehow! */
#else
/*
* Helps in debugging by giving us the fault address.
*
* Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
*/
/*
* Enable protected-mode, paging, write protect, and alignment mask
*/
/*
* At this point we are with kernel's cs and proper eip.
*
* We will be executing not from the copy in real mode platter,
* but from the original code where boot loaded us.
*
* By this time GDT and IDT are loaded as is cr3.
*/
/*
* Before going any farther, enable usage of page table NX bit if
* that's how our page tables are set up.
*/
/* not reached */
int $20 /* whoops, returned somehow! */
#endif
#endif /* __amd64 */
#endif /* lint */
void
return_instr(void)
{}
/*ARGSUSED*/
void
{}
#else /* lint */
/*
* Same for amd64 and i386.
*/
/* AMD Software Optimization Guide - Section 6.2 */
#if defined(__amd64)
/*
* jump indirect to the send_dirint
* function. Set to return if machine
* type module doesnt redirect it.
*/
#endif /* __amd64 */
#endif /* lint */