machprivregs.h revision 530f2c280d739b194cfbb75f25352b75bb99b4b2
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_MACHPRIVREGS_H
#define _SYS_MACHPRIVREGS_H
#include <sys/hypervisor.h>
/*
* Platform dependent instruction sequences for manipulating
* privileged state
*/
#ifdef __cplusplus
extern "C" {
#endif
/*
* CLI and STI are quite complex to virtualize!
*/
#if defined(__amd64)
#define CURVCPU(r) \
#define CURTHREAD(r) \
#define CURVCPU(r) \
#define CURTHREAD(r) \
#endif /* __i386 */
#define XEN_TEST_EVENT_PENDING(r) \
#define XEN_SET_UPCALL_MASK(r) \
#define XEN_GET_UPCALL_MASK(r, mask) \
#define XEN_TEST_UPCALL_MASK(r) \
#define XEN_CLEAR_UPCALL_MASK(r) \
movb $0, VCPU_INFO_EVTCHN_UPCALL_MASK(r)
#ifdef DEBUG
/*
* Much logic depends on the upcall mask being set at
* various points in the code; use this macro to validate.
*
* Need to use CURVCPU(r) to establish the vcpu pointer.
*/
#if defined(__amd64)
#define ASSERT_UPCALL_MASK_IS_SET \
XEN_TEST_UPCALL_MASK(%r11); \
jne 6f; \
jle 6f; \
#define SAVE_CLI_LOCATION \
#define ASSERT_UPCALL_MASK_IS_SET \
XEN_TEST_UPCALL_MASK(%ecx); \
jne 6f; \
cmpl $0, stistipanic; \
jle 6f; \
#define SAVE_CLI_LOCATION \
#endif /* __i386 */
#else /* DEBUG */
#define ASSERT_UPCALL_MASK_IS_SET /* empty */
#define SAVE_CLI_LOCATION /* empty */
#endif /* DEBUG */
#define KPREEMPT_DISABLE(t) \
#define KPREEMPT_ENABLE_NOKP(t) \
#define CLI(r) \
CURTHREAD(r); \
KPREEMPT_DISABLE(r); \
CURVCPU(r); \
XEN_SET_UPCALL_MASK(r); \
CURTHREAD(r); \
CURTHREAD(r); \
KPREEMPT_DISABLE(r); \
CURVCPU(r); \
XEN_GET_UPCALL_MASK(r, ret); \
XEN_SET_UPCALL_MASK(r); \
CURTHREAD(r); \
/*
* We use the fact that HYPERVISOR_block will clear the upcall mask
* for us and then give us an upcall if there is a pending event
* to achieve getting a callback on this cpu without the danger of
* being preempted and migrating to another cpu between the upcall
* enable and the callback delivery.
*/
#if defined(__amd64)
#define STI_CLOBBER /* clobbers %rax, %rdi, %r11 */ \
lock; \
TRAP_INSTR; /* clear upcall mask, force upcall */ \
7:
#define STI \
STI_CLOBBER; /* clobbers %r11, %rax, %rdi */ \
#define STI_CLOBBER /* clobbers %eax, %ebx, %ecx */ \
lock; \
TRAP_INSTR; /* clear upcall mask, force upcall */ \
7:
#define STI \
STI_CLOBBER; /* clobbers %eax, %ebx, %ecx */ \
#endif /* __i386 */
/*
* Map the PS_IE bit to the hypervisor's event mask bit
* To -set- the event mask, we have to do a CLI
* To -clear- the event mask, we have to do a STI
* (with all the accompanying pre-emption and callbacks, ick)
*
* And vice versa.
*/
#if defined(__amd64)
jnz 4f; \
jmp 5f; \
4: STI; \
5:
jnz 1f; \
1:
jnz 4f; \
jmp 5f; \
4: STI; \
5:
jnz 1f; \
1:
#endif /* __i386 */
/*
* Used to re-enable interrupts in the body of exception handlers
*/
#if defined(__amd64)
#define ENABLE_INTR_FLAGS \
popfq; \
#define ENABLE_INTR_FLAGS \
popfl; \
#endif /* __i386 */
/*
* Virtualize IRET and SYSRET
*/
#if defined(__amd64)
#if defined(DEBUG)
/*
* Die nastily with a #ud trap if we are about to switch to user
* mode in HYPERVISOR_IRET and RUPDATE_PENDING is set.
*/
#define __ASSERT_NO_RUPDATE_PENDING \
je 1f; \
je 1f; \
ud2; \
#else /* DEBUG */
#define __ASSERT_NO_RUPDATE_PENDING
#endif /* DEBUG */
/*
* Switching from guest kernel to user mode.
* flag == VGCF_IN_SYSCALL => return via sysret
* flag == 0 => return via iretq
*
* See definition in public/arch-x86_64.h. Stack going in must be:
* rax, r11, rcx, flags, rip, cs, rflags, rsp, ss.
*/
#define HYPERVISOR_IRET(flag) \
syscall; \
ud2 /* die nastily if we return! */
#define IRET HYPERVISOR_IRET(0)
/*
* XXPV: Normally we would expect to use sysret to return from kernel to
* user mode when using the syscall instruction. The iret hypercall
* does support both iret and sysret semantics. For us to use sysret
* style would require that we use the hypervisor's private descriptors
* that obey syscall instruction's imposed segment selector ordering.
* With iret we can use whatever %cs value we choose. We should fix
* this to use sysret one day.
*/
#define SYSRETQ HYPERVISOR_IRET(0)
#define SWAPGS /* empty - handled in hypervisor */
/*
* Switching from guest kernel to user mode.
* See definition in public/arch-x86_32.h. Stack going in must be:
* eax, flags, eip, cs, eflags, esp, ss.
*/
#define HYPERVISOR_IRET \
int $0x82; \
ud2 /* die nastily if we return! */
#define IRET HYPERVISOR_IRET
#endif /* __i386 */
/*
* Xen 3.x wedges the current value of upcall_mask into unused byte of
* saved %cs on stack at the time of passing through a trap or interrupt
* gate. Since Xen also updates PS_IE in %[e,r]lags as well, we always
* will not be confused about bits set in reserved portions of %cs slot.
*
*/
#if defined(__amd64)
#endif /* __i386 */
/*
* All exceptions for amd64 have %r11 and %rcx on the stack.
* Just pop them back into their appropriate registers and
* let it get saved as is running native.
*/
#if defined(__amd64)
#define XPV_TRAP_POP \
#define XPV_TRAP_PUSH \
#endif /* __amd64 */
/*
* Macros for saving the original segment registers and restoring them
* for fast traps.
*/
#if defined(__amd64)
/*
* Smaller versions of INTR_PUSH and INTR_POP for fast traps.
* The following registers have been pushed onto the stack by
* hardware at this point:
*
* greg_t r_rip;
* greg_t r_cs;
* greg_t r_rfl;
* greg_t r_rsp;
* greg_t r_ss;
*
* This handler is executed both by 32-bit and 64-bit applications.
* 64-bit applications allow us to treat the set (%rdi, %rsi, %rdx,
* %rcx, %r8, %r9, %r10, %r11, %rax) as volatile across function calls.
* However, 32-bit applications only expect (%eax, %edx, %ecx) to be volatile
* across a function call -- in particular, %esi and %edi MUST be saved!
*
* We could do this differently by making a FAST_INTR_PUSH32 for 32-bit
* programs, and FAST_INTR_PUSH for 64-bit programs, but it doesn't seem
* particularly worth it.
*
*/
#define FAST_INTR_PUSH \
#define FAST_INTR_POP \
#define FAST_INTR_RETURN \
#define FAST_INTR_PUSH \
cld; \
#define FAST_INTR_POP \
#define FAST_INTR_RETURN \
#endif /* __i386 */
/*
* Handling the CR0.TS bit for floating point handling.
*
* When the TS bit is *set*, attempts to touch the floating
* point hardware will result in a #nm trap.
*/
#if defined(__amd64)
#define CLTS \
#define STTS(r) \
pushl $1; \
#define CLTS \
pushl $0; \
#endif /* __i386 */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_MACHPRIVREGS_H */