/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
/* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
/* All Rights Reserved */
/* Copyright (c) 1987, 1988 Microsoft Corporation */
/* All Rights Reserved */
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include <sys/x86_archext.h>
#include <sys/machbrand.h>
#include <sys/privregs.h>
#if defined(__lint)
#else /* __lint */
#include <sys/segments.h>
#include <sys/traptrace.h>
#include "assym.h"
#endif /* __lint */
/*
* We implement two flavours of system call entry points
*
* - {int,lcall}/iret (i386)
*
* The basic pattern used in the handlers is to check to see if we can
* do fast (simple) version of the system call; if we can't we use various
* C routines that handle corner cases and debugging.
*
* To reduce the amount of assembler replication, yet keep the system call
* implementations vaguely comprehensible, the common code in the body
* of the handlers is broken up into a set of preprocessor definitions
* below.
*/
/*
* When we have SYSCALLTRACE defined, we sneak an extra
* predicate into a couple of tests.
*/
#if defined(SYSCALLTRACE)
#else
#endif
/*
* This check is false whenever we want to go fast i.e.
*
* if (code >= NSYSCALL ||
* t->t_pre_sys || (t->t_proc_flag & TP_WATCHPT) != 0)
* do full version
* #ifdef SYSCALLTRACE
* if (syscalltrace)
* do full version
* #endif
*
* Preconditions:
* - t curthread
* - code contains the syscall number
* Postconditions:
* - %ecx and %edi are smashed
* - condition code flag ZF is cleared if pre-sys is too complex
*/
/*
* Check if a brand_mach_ops callback is defined for the specified callback_id
* type. If so invoke it with the user's %gs value loaded and the following
* data on the stack:
* --------------------------------------
* | user's %ss |
* | | user's %esp |
* | | EFLAGS register |
* | | user's %cs |
* | | user's %eip (user return address) |
* | | 'scratch space' |
* | | user's %ebx |
* | | user's %gs selector |
* v | lwp pointer |
* | callback wrapper return addr |
* --------------------------------------
*
* If the brand code returns, we assume that we are meant to execute the
* normal system call path.
*
* The interface to the brand callbacks on the 32-bit kernel assumes %ebx
* is available as a scratch register within the callback. If the callback
* returns within the kernel then this macro will restore %ebx. If the
* callback is going to return directly to userland then it should restore
* %ebx before returning to userland.
*/
je 1f ;\
/*
* aka CPU_STATS_ADDQ(CPU, sys.syscall, 1)
* This must be called with interrupts or preemption disabled.
*/
#define CPU_STATS_SYS_SYSCALL_INC \
#if !defined(__lint)
/*
* ASSERT(lwptoregs(lwp) == rp);
*
* this may seem obvious, but very odd things happen if this
* assertion is false
*
* Preconditions:
* -none-
* Postconditions (if assertion is true):
* %esi and %edi are smashed
*/
#if defined(DEBUG)
.string "syscall_asm.s:%d lwptoregs(%p) [%p] != rp [%p]"
je 7f; \
pushl $__lwptoregs_msg; \
7:
#else
#endif
#endif /* __lint */
/*
* This is an assembler version of this fragment:
*
* lwp->lwp_state = LWP_SYS;
* lwp->lwp_ru.sysc++;
* lwp->lwp_eosys = NORMALRETURN;
* lwp->lwp_ap = argp;
*
* Preconditions:
* -none-
* Postconditions:
* -none-
*/
/*
* Set up the thread, lwp, find the handler, and copy
* in the arguments from userland to the kernel stack.
*
* Preconditions:
* - %eax contains the syscall number
* Postconditions:
* - %eax contains a pointer to the sysent structure
* - %ecx is zeroed
* - %esi, %edi are smashed
* - %esp is SYS_DROPped ready for the syscall
*/
jz 4f; \
rep; \
smovl; \
4:
/*
* Check to see if a simple return is possible i.e.
*
* if ((t->t_post_sys_ast | syscalltrace) != 0)
* do full version;
*
* Preconditions:
* - t is curthread
* Postconditions:
* - condition code NE is set if post-sys is too complex
* - rtmp is zeroed if it isn't (we rely on this!)
*/
ORL_SYSCALLTRACE(rtmp); \
/*
* Fix up the lwp, thread, and eflags for a successful return
*
* Preconditions:
* - zwreg contains zero
* Postconditions:
* - %esp has been unSYS_DROPped
* - %esi is smashed (points to lwp)
*/
/*
* System call handler. This is the destination of both the call
* gate (lcall 0x27) _and_ the interrupt gate (int 0x91). For our purposes,
* there are two significant differences between an interrupt gate and a call
* gate:
*
* 1) An interrupt gate runs the handler with interrupts disabled, whereas a
* call gate runs the handler with whatever EFLAGS settings were in effect at
* the time of the call.
*
* 2) An interrupt gate pushes the contents of the EFLAGS register at the time
* of the interrupt onto the stack, whereas a call gate does not.
*
* Because we use the following code sequence to handle system calls made from
* _both_ a call gate _and_ an interrupt gate, these two differences must be
* respected. In regards to number 1) above, the handler must ensure that a sane
* EFLAGS snapshot is stored on the stack so that when the kernel returns back
* to the user via iret (which returns to user with the EFLAGS value saved on
* the stack), interrupts are re-enabled.
*
* In regards to number 2) above, the handler must always put a current snapshot
* of EFLAGS onto the stack in the appropriate place. If we came in via an
* interrupt gate, we will be clobbering the EFLAGS value that was pushed by
* the interrupt gate. This is OK, as the only bit that was changed by the
* hardware was the IE (interrupt enable) bit, which for an interrupt gate is
* now off. If we were to do nothing, the stack would contain an EFLAGS with
* IE off, resulting in us eventually returning back to the user with interrupts
* disabled. The solution is to turn on the IE bit in the EFLAGS value saved on
* the stack.
*
* Another subtlety which deserves mention is the difference between the two
* descriptors. The call gate descriptor is set to instruct the hardware to copy
* one parameter from the user stack to the kernel stack, whereas the interrupt
* gate descriptor doesn't use the parameter passing mechanism at all. The
* kernel doesn't actually use the parameter that is copied by the hardware; the
* only reason it does this is so that there is a space on the stack large
* enough to hold an EFLAGS register value, which happens to be in the correct
* place for use by iret when we go back to userland. How convenient.
*
* Stack frame description in syscall() and callees.
*
* |------------|
* | regs | +(8*4)+4 registers
* |------------|
* | 8 args | <- %esp MAXSYSARGS (currently 8) arguments
* |------------|
*
*/
#if defined(__lint)
/*ARGSUSED*/
void
sys_call()
{}
void
{}
#else /* __lint */
#ifdef TRAPTRACE
#endif
/ doesn't migrate off the CPU while it updates the CPU stats.
/
/ XXX This is only true if we got here via call gate thru the LDT for
/ old style syscalls. Perhaps this preempt++-- will go away soon?
movl %gs:CPU_THREAD, %ebx
addb $1, T_PREEMPT(%ebx)
CPU_STATS_SYS_SYSCALL_INC
subb $1, T_PREEMPT(%ebx)
ENABLE_INTR_FLAGS
pushl %eax / preserve across mstate call
MSTATE_TRANSITION(LMS_USER, LMS_SYSTEM)
popl %eax
movl %gs:CPU_THREAD, %ebx
ASSERT_LWPTOREGS(%ebx, %esp)
CHECK_PRESYS_NE(%ebx, %eax)
jne _full_syscall_presys
SIMPLE_SYSCALL_PRESYS(%ebx, _syscall_fault)
_syslcall_call:
call *SY_CALLC(%eax)
_syslcall_done:
CHECK_POSTSYS_NE(%ebx, %ecx)
jne _full_syscall_postsys
SIMPLE_SYSCALL_POSTSYS(%ebx, %cx)
movl %eax, REGOFF_EAX(%esp)
movl %edx, REGOFF_EDX(%esp)
MSTATE_TRANSITION(LMS_SYSTEM, LMS_USER)
/
/ get back via iret
/
CLI(%edx)
jmp sys_rtt_syscall
_full_syscall_presys:
movl T_LWP(%ebx), %esi
subl $SYS_DROP, %esp
movb $LWP_SYS, LWP_STATE(%esi)
pushl %esp
pushl %ebx
call syscall_entry
addl $8, %esp
jmp _syslcall_call
_full_syscall_postsys:
addl $SYS_DROP, %esp
pushl %edx
pushl %eax
pushl %ebx
call syscall_exit
addl $12, %esp
MSTATE_TRANSITION(LMS_SYSTEM, LMS_USER)
jmp _sys_rtt
_syscall_fault:
push $0xe / EFAULT
call set_errno
addl $4, %esp
xorl %eax, %eax / fake syscall_err()
xorl %edx, %edx
jmp _syslcall_done
SET_SIZE(sys_call)
SET_SIZE(brand_sys_call)
#endif /* __lint */
/*
* System call handler via the sysenter instruction
*
*
*
*
* pusha's {%eax, %ecx, %edx, %ebx, %esp, %ebp, %esi, %edi}, followed
* Then the kernel sets %ds, %es and %gs to kernel selectors, and finally
* extracts %efl and puts it into r_efl (which happens to live at the offset
* that <top-of-stack> was copied into). Note that the value in r_efl has
* the IF (interrupt enable) flag turned on. (The int instruction into the
* interrupt gate does essentially the same thing, only instead of
* <top-of-stack> we get eflags - see comment above.)
*
* In the sysenter case, things are a lot more primitive.
*
* The caller in userland has arranged that:
*
* - %eax contains the syscall number
* - %ecx contains the user %esp
* - %edx contains the return %eip
* - the user stack contains the args to the syscall
*
* e.g.
* <args on the stack>
* mov $SYS_callnum, %eax
* mov $1f, %edx / return %eip
* mov %esp, %ecx / return %esp
* sysenter
* 1:
*
* Hardware and (privileged) initialization code have arranged that by
* the time the sysenter instructions completes:
*
* - %eip is pointing to sys_sysenter (below).
* - %cs and %ss are set to kernel text and stack (data) selectors.
* - %esp is pointing at the lwp's stack
*
*
* - do the normal work of a syscall
* - execute the system call epilogue, use sysexit to return to userland.
*
* Note that we are unable to return both "rvals" to userland with this
* call, as %edx is used by the sysexit instruction.
*
* One final complication in this routine is its interaction with
* single-stepping in a debugger. For most of the system call mechanisms,
* the CPU automatically clears the single-step flag before we enter the
* kernel. The sysenter mechanism does not clear the flag, so a user
* single-stepping through the kernel. To detect this, kmdb compares the
* trap %pc to the [brand_]sys_enter addresses on each single-step trap.
* If it finds that we have single-stepped to a sysenter entry point, it
* explicitly clears the flag and executes the sys_sysenter routine.
*
* One final complication in this final complication is the fact that we
* have two different entry points for sysenter: brand_sys_sysenter and
* sys_sysenter. If we enter at brand_sys_sysenter and start single-stepping
* through the kernel with kmdb, we will eventually hit the instruction at
* sys_sysenter. kmdb cannot distinguish between that valid single-step
* and the undesirable one mentioned above. To avoid this situation, we
* simply add a jump over the instruction at sys_sysenter to make it
* impossible to single-step to it.
*/
#if defined(__lint)
void
sys_sysenter()
{}
#else /* __lint */
ENTRY_NP(brand_sys_sysenter)
pushl %edx
BRAND_CALLBACK(BRAND_CB_SYSENTER)
popl %edx
/*
* Jump over sys_sysenter to allow single-stepping as described
* above.
*/
ja 1f
ALTENTRY(sys_sysenter)
nop
1:
/
/
pushl %ecx / userland makes this a copy of %esp
pushfl
orl $PS_IE, (%esp) / turn interrupts on when we return to user
pushl $UCS_SEL
pushl %edx / userland makes this a copy of %eip
/
/ done. finish building the stack frame
/
subl $8, %esp / leave space for ERR and TRAPNO
SYSENTER_PUSH
#ifdef TRAPTRACE
TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_SYSENTER) / uses labels 8 and 9
TRACE_REGS(%edi, %esp, %ebx, %ecx) / uses label 9
pushl %eax
TRACE_STAMP(%edi) / clobbers %eax, %edx, uses label 9
popl %eax
movl %eax, TTR_SYSNUM(%edi)
#endif
movl %esp, %ebp
CPU_STATS_SYS_SYSCALL_INC
ENABLE_INTR_FLAGS
pushl %eax / preserve across mstate call
MSTATE_TRANSITION(LMS_USER, LMS_SYSTEM)
popl %eax
movl %gs:CPU_THREAD, %ebx
ASSERT_LWPTOREGS(%ebx, %esp)
CHECK_PRESYS_NE(%ebx, %eax)
jne _full_syscall_presys
SIMPLE_SYSCALL_PRESYS(%ebx, _syscall_fault)
_sysenter_call:
call *SY_CALLC(%eax)
_sysenter_done:
CHECK_POSTSYS_NE(%ebx, %ecx)
jne _full_syscall_postsys
SIMPLE_SYSCALL_POSTSYS(%ebx, %cx)
/
/
/*
* Declare a uintptr_t which covers the entire pc range of syscall
* handlers for the stack walkers that need this.
*/
.NWORD . - _allsyscalls
#endif /* __lint */
/*
*/
#if defined(__lint)
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
sep_restore(void *ksp)
{}
#else /* __lint */
/*
* setting this value to zero as we switch away causes the
* stack-pointer-on-sysenter to be NULL, ensuring that we
* don't silently corrupt another (preempted) thread stack
* when running an lwp that (somehow) didn't get sep_restore'd
*/
/*
* Update the kernel stack pointer as we resume onto this cpu.
*/
#endif /* __lint */
/*
* Call syscall(). Called from trap() on watchpoint at lcall 0,7
*/
#if defined(__lint)
void
watch_syscall(void)
{}
#else /* __lint */
#endif /* __lint */