/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
/* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
/* All Rights Reserved */
#include <sys/sysmacros.h>
#include <sys/archsystm.h>
#include <sys/ucontext.h>
#include <sys/debugreg.h>
#include <sys/privregs.h>
#include <vm/seg_kmem.h>
#include <sys/tuneable.h>
#include <sys/bootconf.h>
#include <sys/systeminfo.h>
#include <sys/contract_impl.h>
#include <sys/x86_archext.h>
#include <sys/segments.h>
#ifdef __xpv
#include <sys/hypervisor.h>
#endif
/*
* Compare the version of boot that boot says it is against
* the version of boot the kernel expects.
*/
int
{
if (boots_version == BO_VERSION)
return (0);
prom_printf("Wrong boot interface - kernel needs v%d found v%d\n",
prom_panic("halting");
/*NOTREACHED*/
}
/*
* Process the physical installed list for boot.
* Finds:
* 1) the pfn of the highest installed physical page,
* 2) the number of pages installed
* 3) the number of distinct contiguous regions these pages fall into.
* 4) the number of contiguous memory ranges
*/
void
int *ranges) /* return ptr for the count of contig. ranges */
{
int cnt = 0;
++cnt;
}
}
void
{
int ranges;
}
void
phys_install_has_changed(void)
{}
/*
* Copy in a memory list from boot to kernel, with a filter function
* decrease the size to filter out pages. It will also align addresses and
* sizes to PAGESIZE.
*/
void
{
/*
* Move through the memlist applying a filter against
* each range of memory. Note that we may apply the
* filter multiple times against each memlist entry.
*/
if (size == 0)
break;
dst++;
} else {
dst++;
prev++;
}
}
}
}
/*
* Kernel setup code, called from startup().
*/
void
kern_setup1(void)
{
proc_sched = pp;
/*
* Initialize process 0 data structures
*/
/*
* XXX - we asssume that the u-area is zeroed out except for
* ttolwp(curthread)->lwp_regs.
*/
thread_init(); /* init thread_free list */
pid_init(); /* initialize pid (proc) table */
contract_init(); /* initialize contracts */
}
/*
* Load a procedure into a thread.
*/
void
{
long *p;
extern void thread_start();
/*
* Push a "c" call frame onto the stack to represent
* the caller of "start".
*/
if (len != 0) {
/*
* the object that arg points at is copied into the
* caller's frame.
*/
}
/*
* Set up arguments (arg and len) on the caller's stack frame.
*/
p = (long *)sp;
*--p = 0; /* fake call */
*--p = 0; /* null frame pointer terminates stack trace */
*--p = (long)len;
/*
* initialize thread to resume at thread_start() which will
* turn around and invoke (*start)(arg, len).
*/
}
/*
* load user registers into lwp.
*/
/*ARGSUSED2*/
void
{
/*
* For 64-bit lwps, we allow one magic %fs selector value, and one
* magic %gs selector to point anywhere in the address space using
* %fsbase and %gsbase behind the scenes. libc uses %fs to point
* at the ulwp_t structure.
*
* For 32-bit lwps, libc wedges its lwp thread pointer into the
* ucontext ESP slot (which is otherwise irrelevant to setting a
* ucontext) and LWPGS_SEL value into gregs[REG_GS]. This is so
* syslwp_create() can atomically setup %gs.
*
* See setup_context() in libc.
*/
#ifdef _SYSCALL32_IMPL
} else {
/*
* See lwp_setprivate in kernel and setup_context in libc.
*
* Currently libc constructs a ucontext from whole cloth for
* every new (not main) lwp created. For 64 bit processes
* %fsbase is directly set to point to current thread pointer.
* In the past (solaris 10) %fs was also set LWPFS_SEL to
* indicate %fsbase. Now we use the null GDT selector for
* this purpose. LWP[FS|GS]_SEL are only intended for 32 bit
* processes. To ease transition we support older libcs in
* the newer kernel by forcing %fs or %gs selector to null
* by calling lwp_setprivate if LWP[FS|GS]_SEL is passed in
* the ucontext. This is should be ripped out at some future
* date. Another fix would be for libc to do a getcontext
* and inherit the null %fs/%gs from the current context but
* that means an extra system call and could hurt performance.
*/
}
#else
#endif
}
/*
* set syscall()'s return values for a lwp.
*/
void
{
}
/*
* set syscall()'s return values for a lwp.
*/
void
{
}
/*
* Copy regs from parent to child.
*/
void
{
#if defined(__amd64)
if (pcb->pcb_rupdate == 0) {
}
#endif
}
/*
* This function is currently unused on x86.
*/
/*ARGSUSED*/
void
{}
/*
* This function is currently unused on x86.
*/
void
lwp_pcb_exit(void)
{}
/*
* Lwp context ops for segment registers.
*/
/*
* Every time we come into the kernel (syscall, interrupt or trap
* but not fast-traps) we capture the current values of the user's
* segment registers into the lwp's reg structure. This includes
* lcall for i386 generic system call support since it is handled
* as a segment-not-present trap.
*
* Here we save the current values from the lwp regs into the pcb
* and set pcb->pcb_rupdate to 1 to tell the rest of the kernel
* that the pcb copy of the segment registers is the current one.
* This ensures the lwp's next trip to user land via update_sregs.
* Finally we set t_post_sys to ensure that no system call fast-path's
* its way out of the kernel via sysret.
*
* (This means that we need to have interrupts disabled when we test
* t->t_post_sys in the syscall handlers; if the test fails, we need
* to keep interrupts disabled until we return to userland so we can't
* be switched away.)
*
* As a result of all this, we don't really have to do a whole lot if
* the thread is just mucking about in the kernel, switching on and
* off the cpu for whatever reason it feels like. And yet we still
* preserve fast syscalls, cause if we -don't- get descheduled,
* we never come here either.
*/
/*ARGSUSED*/
void
{
#if defined(__amd64)
if (pcb->pcb_rupdate == 0) {
/*
* If there's no update already pending, capture the current
* %ds/%es/%fs/%gs values from lwp's regs in case the user
* changed them; %fsbase and %gsbase are privileged so the
* kernel versions of these registers in pcb_fsbase and
* pcb_gsbase are always up-to-date.
*/
}
#endif /* __amd64 */
#if !defined(__xpv) /* XXPV not sure if we can re-read gdt? */
#endif
}
#if defined(__amd64)
/*
* Update the segment registers with new values from the pcb.
*
* We have to do this carefully, and in the following order,
* in case any of the selectors points at a bogus descriptor.
* If they do, we'll catch trap with on_trap and return 1.
* returns 0 on success.
*
* This is particularly tricky for %gs.
* This routine must be executed under a cli.
*/
int
{
int rc = 0;
#if defined(__xpv)
/*
* On the hyervisor this is easy. The hypercall below will
* swapgs and load %gs with the user selector. If the user
* selector is bad the hypervisor will catch the fault and
* load %gs with the null selector instead. Either way the
* kernel's gsbase is not damaged.
*/
no_trap();
return (1);
}
#else /* __xpv */
/*
* A little more complicated running native.
*/
/*
* If __set_gs fails it's because the new %gs is a bad %gs,
* we'll be taking a trap but with the original %gs and %gsbase
* undamaged (i.e. pointing at curcpu).
*
* We've just mucked up the kernel's gsbase. Oops. In
* particular we can't take any traps at all. Make the newly
* computed gsbase be the hidden gs via __swapgs, and fix
* the kernel's gsbase back again. Later, when we return to
* userland we'll swapgs again restoring gsbase just loaded
* above.
*/
__swapgs();
/*
* restore kernel's gsbase
*/
#endif /* __xpv */
/*
* Only override the descriptor base address if
* r_gs == LWPGS_SEL or if r_gs == NULL. A note on
* NULL descriptors -- 32-bit programs take faults
* if they deference NULL descriptors; however,
* when 64-bit programs load them into %fs or %gs,
* they DONT fault -- only the base address remains
* whatever it was from the last load. Urk.
*
* XXX - note that lwp_setprivate now sets %fs/%gs to the
* null selector for 64 bit processes. Whereas before
* %fs/%gs were set to LWP(FS|GS)_SEL regardless of
* the process's data model. For now we check for both
* values so that the kernel can also support the older
* libc. This should be ripped out at some point in the
* future.
*/
#if defined(__xpv)
pcb->pcb_gsbase)) {
no_trap();
return (1);
}
#else
#endif
}
/*
* Same as for %gs
*/
#if defined(__xpv)
pcb->pcb_fsbase)) {
no_trap();
return (1);
}
#else
#endif
}
} else {
cli();
rc = 1;
}
no_trap();
return (rc);
}
/*
* Make sure any stale selectors are cleared from the segment registers
* by putting KDS_SEL (the kernel's default %ds gdt selector) into them.
* This is necessary because the kernel itself does not use %es, %fs, nor
* %ds. (%cs and %ss are necessary, and are set up by the kernel - along with
* %gs - to point to the current cpu struct.) If we enter kmdb while in the
* kernel and resume with a stale ldt or brandz selector sitting there in a
* segment register, kmdb will #gp fault if the stale selector points to,
* for example, an ldt in the context of another process.
*
* WARNING: Intel and AMD chips behave differently when storing
* the null selector into %fs and %gs while in long mode. On AMD
* chips fsbase and gsbase are not cleared. But on Intel chips, storing
* a null selector into %fs or %gs has the side effect of clearing
* fsbase or gsbase. For that reason we use KDS_SEL, which has
* consistent behavor between AMD and Intel.
*
* Caller responsible for preventing cpu migration.
*/
void
reset_sregs(void)
{
cli();
/*
* restore kernel gsbase
*/
#if defined(__xpv)
#else
#endif
sti();
}
#endif /* __amd64 */
#ifdef _SYSCALL32_IMPL
/*
* Make it impossible for a process to change its data model.
* We do this by toggling the present bits for the 32 and
* 64-bit user code descriptors. That way if a user lwp attempts
* to change its data model (by using the wrong code descriptor in
* %cs) it will fault immediately. This also allows us to simplify
* assertions and checks in the kernel.
*/
static void
{
if (model == DATAMODEL_NATIVE) {
} else {
}
}
#endif /* _SYSCALL32_IMPL */
/*
* Restore lwp private fs and gs segment descriptors
* on current cpu's GDT.
*/
static void
{
#ifdef _SYSCALL32_IMPL
#endif
}
#ifdef _SYSCALL32_IMPL
static void
{
/*LINTED*/
}
#endif /* _SYSCALL32_IMPL */
/*
* If this is a process in a branded zone, then we want it to use the brand
* syscall entry points instead of the standard Solaris entry points. This
* routine must be called when a new lwp is created within a branded zone
* or when an existing lwp moves into a branded zone via a zone_enter()
* operation.
*/
void
{
brand_interpositioning_disable, NULL) == 0);
if (t == curthread) {
}
}
/*
* If this is a process in a branded zone, then we want it to disable the
* brand syscall entry points. This routine must be called when the last
* lwp in a process is exiting in proc_exit().
*/
void
{
if (t == curthread)
/* Remove the original context handlers */
brand_interpositioning_disable, NULL) != 0);
if (t == curthread) {
/* Cleanup our MSR and IDT entries. */
}
}
/*
* Add any lwp-associated context handlers to the lwp at the beginning
* of the lwp's useful life.
*
* All paths which create lwp's invoke lwp_create(); lwp_create()
* invokes lwp_stk_init() which initializes the stack, sets up
* lwp_regs, and invokes this routine.
*
* All paths which destroy lwp's invoke lwp_exit() to rip the lwp
* apart and put it on 'lwp_deathrow'; if the lwp is destroyed it
* ends up in thread_free() which invokes freectx(t, 0) before
* invoking lwp_stk_fini(). When the lwp is recycled from death
* row, lwp_stk_fini() is invoked, then thread_free(), and thus
* freectx(t, 0) as before.
*
* In the case of exec, the surviving lwp is thoroughly scrubbed
* clean; exec invokes freectx(t, 1) to destroy associated contexts.
* On the way back to the new image, it invokes setregs() which
* in turn invokes this routine.
*/
void
{
#ifdef _SYSCALL32_IMPL
#else
#endif
/*
* Install the basic lwp context handlers on each lwp.
*
* On the amd64 kernel, the context handlers are responsible for
* virtualizing %ds, %es, %fs, and %gs to the lwp. The register
* values are only ever changed via sys_rtt when the
* pcb->pcb_rupdate == 1. Only sys_rtt gets to clear the bit.
*
* On the i386 kernel, the context handlers are responsible for
* virtualizing %gs/%fs to the lwp by updating the per-cpu GDTs
*/
if (thisthread)
if (thisthread) {
/*
* Since we're the right thread, set the values in the GDT
*/
}
/*
* to ensure that the hardware mechanism is kept up-to-date with the
* lwp's kernel stack pointer across context switches.
*
* sep_save zeros the sysenter stack pointer msr; sep_restore sets
* it to the lwp's kernel stack pointer (kstktop).
*/
#if defined(__amd64)
#endif
if (thisthread)
installctx(t, kstktop,
if (thisthread) {
/*
* We're the right thread, so set the stack pointer
* for the first sysenter instruction to use
*/
}
}
if (PROC_IS_BRANDED(ttoproc(t)))
}
/*
* Clear registers on exec(2).
*/
void
{
/*
* Initialize user registers
*/
(void) save_syscall_args(); /* copy args from registers first */
#if defined(__amd64)
/*
* Only allow 64-bit user code descriptor to be present.
*/
/*
* Arrange that the virtualized %fs and %gs GDT descriptors
* have a well-defined initial state (present, ring 3
* and of type data).
*/
/*
* thrptr is either NULL or a value used by DTrace.
* 64-bit processes use %fs as their "thread" register.
*/
} else {
/*
* only allow 32-bit user code selector to be present.
*/
/*
* thrptr is either NULL or a value used by DTrace.
* 32-bit processes use %gs as their "thread" register.
*/
}
/*
* Arrange that the virtualized %fs and %gs GDT descriptors
* have a well-defined initial state (present, ring 3
* and of type data).
*/
/*
* For %gs we need to reset LWP_GSBASE in pcb and the
* per-cpu GDT descriptor. thrptr is either NULL
* or a value used by DTrace.
*/
#endif
t->t_post_sys = 1;
/*
* Here we initialize minimal fpu state.
* The rest is done at the first floating
* point instruction that a process executes.
*/
/*
* Add the lwp context handlers that virtualize segment registers,
*/
}
cpu_get_gdt(void)
{
}
#if !defined(lwp_getdatamodel)
/*
* Return the datamodel of the given lwp.
*/
/*ARGSUSED*/
{
}
#endif /* !lwp_getdatamodel */
#if !defined(get_udatamodel)
get_udatamodel(void)
{
}
#endif /* !get_udatamodel */