locore.s revision ae115bc77f6fcde83175c75b4206dc2e50747966
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#if defined(__lint)
#endif /* __lint */
#include <sys/asm_linkage.h>
#include <sys/controlregs.h>
#include <amd64/machregs.h>
#include <assym.h>
#if defined(__lint)
/*ARGSUSED*/
void
{}
#else /* __lint */
.data
.long 1
/*
* stash current i386 state in i386_machregs
*/
#ifdef DEBUG
#endif /* DEBUG */
/*
* Fetch the argument, and switch to it as a stack;
* the new stack contains an amd64_machregs on it,
* just sitting there waiting for us to restore it.
*/
/*NOTREACHED*/
#endif /* __lint */
#if defined(__lint)
#define VTRAP_STUB_BEGIN(opname) \
#define VTRAP_STUB_END(opname) \
#define VTRAP_STUB(symname) \
#else
#define VTRAP_STUB_BEGIN(symname) \
symname: \
#define VTRAP_STUB_END(symname) \
.code32; \
/*
* callbacks from the amd64 kernel to the i386 world are handled
* as calls into a virtual amd64 boot program as if they were
* virtual traps i.e. we save the machine state, switch to i386 mode,
* then decode and interpret the request in C code (amd64_vtrap)
*/
#define VTRAP_STUB(opname) \
call 1f; \
/*
* put the state of the amd64 machine onto the stack
*/
push $0 /* %ss */
pushf /* rflags */
push $0 /* %cs */
push $0 /* err */
push $0 /* %es */
push $0 /* %ds */
/*
* (that was the 'struct regs' part, now for the somewhat trickier
* parts of the machine (with all the implicit state that goes
* along with those registers (?)))
*/
/* XX64 need to do some compile-time assert here to check this! */
push $0
push $0
push $0
push $0
#define PUSH_SEG_BASE(msr) \
rdmsr; \
/*
* save the sodding segment registers (because push doesn't work!)
*/
/*
* Back to i386 mode
*/
/*
* reload %ds here so we can refer to i386_machregs below
*/
/*
* 1. Switch to compatibility mode at CPL=0
*
* We seem forced to do this -- which is a complicated
* way to do:
*
* ljmp $KCS32SEL, $__amd64_compat_mode
* __amd64_compat_mode:
*
* which unfortunately isn't legal in long mode.
*
* You'd think this would work, but it doesn't.
*
* push $KCS32SEL
* push %rax
* lret
*
* Perhaps there's a better way?
*/
call 9f
/*
* 2. Deactivate long mode by clearing CR0.PG
*/
/*
* 2a. Disable PAE
*/
/*
* 3. Load CR3 with physical base address of page tables
*
* (Note we loaded %ds above)
*/
/*
* 4. Disable long mode by clearing EFER.LME to 0
*/
/*
* 5. Enable legacy page-translation
*/
/*
* Reconstruct our life as an i386 processor from the
* exitto save area.
*/
/*
* %cr2 is the page fault address; we have no need to restore it
*/
/*
* Need to clear busy bit in our tss descriptor
*/
/ clrtss:
/
/*
* As long as the transition from long mode to i386 mode
* simply truncated %rsp -> %esp, we now have a struct amd64_machregs
* sitting on the top of the stack.
*/
/*
* let's go long ..
*/
/*
* Disable paging
*/
/*
* 2a. enable PAE
*/
/*
* 2b. load CR3 with PML4 base address
*/
/*
* 2c. enable long mode
*/
/*
* 2d. enable paging
*/
/*
* we are now in compatibility mode
* move to the 64 bit descriptor tables so that
* we find ourselves in a sane place when we lret
* and switch to 64 bit mode ..
*/
/*
* switch to 64-bit mode
*/
call 1f
/*
* the following descriptor table loads fetch the full
* 64-bit values expected by the client.
*/
/*
* fix up the selectors for long mode
*/
#define RESTORE_SEG_BASE(seg) \
#define RESTORE_CR(num) \
RESTORE_CR(0)
/* don't restore %cr2 */
RESTORE_CR(3)
RESTORE_CR(4)
/*
* Only restore %cr8 if it's nonzero or if we have not yet initialized
* it (if it's zero, that means it's not safe to restore it -- we're
* either using the local APIC TPR or no TPR at all). We only test the
* non-reserved bits. The %cr8 initialization is done only on the first
* transfer from the booter to the loaded image.
*/
1:
RESTORE_CR(8)
2:
/*
* and %es.
*
* Meanwhile %rbp, %r11, err and trapno don't get restored at all.
*/
/*
* The bottom five arguments in the struct amd64_machregs on the
* stack (starting with r_rip) are positioned such that they can be
* used as-is by iretq to return to the caller, switch interrupts
* back on if needed, and restore the proper %rsp.
*
* HOWEVER, we need the %rbp and %rip sitting in the return frame
* on the stack, so grab them from beyond the end of the amd64_machregs
* structure on the stack so that everything will be restored properly
* by the iretq.
*
* The stack after the addq below will be:
*
* 0 amd64_machregs %rip
* +8 amd64_machregs %cs
* +0x10 amd64_machflags rflags
* +0x18 amd64_machflags %rsp
* +0x20 amd64_machflags %ss
* +0x28 return %rbp from bootops 'call' insn
* +0x30 return %rip from bootops 'call' insn
*/
#endif /* __lint */