swtch.s revision 49dc33e37f0b57cc47ce0a40a5dffaf6627bae4d
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
*/
/*
* Process switching routines.
*/
#if defined(__lint)
#else /* __lint */
#include "assym.h"
#endif /* __lint */
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include <sys/privregs.h>
#include <sys/segments.h>
/*
* resume(thread_id_t t);
*
* a thread can only run on one processor at a time. there
* exists a window on MPs where the current thread on one
* processor is capable of being dispatched by another processor.
* some overlap between outgoing and incoming threads can happen
* when they are the same thread. in this case where the threads
* are the same, resume() on one processor will spin on the incoming
* thread until resume() on the other processor has finished with
* the outgoing thread.
*
* The MMU context changes when the resuming thread resides in a different
* process. Kernel threads are known by resume to reside in process 0.
* The MMU context, therefore, only changes when resuming a thread in
* a process different from curproc.
*
* resume_from_intr() is called when the thread being resumed was not
* passivated by resume (e.g. was interrupted). This means that the
* resume lock is already held and that a restore context is not needed.
* Also, the MMU context is not changed on the resume in this case.
*
* resume_from_zombie() is the same as resume except the calling thread
* is a zombie and must be put on the deathrow list after the CPU is
* off the stack.
*/
#if !defined(__lint)
#if LWP_PCB_FPU != 0
#endif /* LWP_PCB_FPU != 0 */
#endif /* !__lint */
#if defined(__amd64)
/*
* Save non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
*
* The stack frame must be created before the save of %rsp so that tracebacks
* of swtch()ed-out processes show the process as having last called swtch().
*/
/*
* Restore non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
*
* We load up %rsp from the label_t as part of the context switch, so
* we don't repeat that here.
*
* We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
* already has the effect of putting the stack back the way it was when
* we came in.
*/
#define RESTORE_REGS(scratch_reg) \
/*
* Get pointer to a thread's hat structure
*/
#define TSC_READ() \
/*
* If we are resuming an interrupt thread, store a timestamp in the thread
* structure. If an interrupt occurs between tsc_read() and its subsequent
* store, the timestamp will be stale by the time it is stored. We can detect
* this by doing a compare-and-swap on the thread's timestamp, since any
* interrupt occurring in this window will put a new timestamp in the thread's
* t_intr_start field.
*/
#define STORE_INTR_START(thread_t) \
jz 1f; \
0: \
TSC_READ(); \
jnz 0b; \
1:
/*
* Save non-volatile registers (%ebp, %esi, %edi and %ebx)
*
* The stack frame must be created before the save of %esp so that tracebacks
* of swtch()ed-out processes show the process as having last called swtch().
*/
/*
* Restore non-volatile registers (%ebp, %esi, %edi and %ebx)
*
* We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
* already has the effect of putting the stack back the way it was when
* we came in.
*/
#define RESTORE_REGS(scratch_reg) \
/*
* Get pointer to a thread's hat structure
*/
/*
* If we are resuming an interrupt thread, store a timestamp in the thread
* structure. If an interrupt occurs between tsc_read() and its subsequent
* store, the timestamp will be stale by the time it is stored. We can detect
* this by doing a compare-and-swap on the thread's timestamp, since any
* interrupt occurring in this window will put a new timestamp in the thread's
* t_intr_start field.
*/
#define STORE_INTR_START(thread_t) \
jz 1f; \
0: \
jnz 0b; \
1:
#endif /* __amd64 */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
/*
* Save non-volatile registers, and set return address for current
* thread to resume_return.
*
* %r12 = t (new thread) when done
*/
/*
* Call savectx if thread has installed context ops.
*
* Note that if we have floating point context, the save op
* (either fpsave_begin or fpxsave_begin) will issue the
* async save instruction (fnsave or fxsave respectively)
* that we fwait for below.
*/
/*
* Call savepctx if process has installed context ops.
*/
/*
* Temporarily switch to the idle thread's stack
*/
/*
* Set the idle thread as the current thread
*/
/*
* Switch in the hat context for the new thread
*
*/
/*
* Clear and unlock previous thread's t_lock
* to allow it to be dispatched by another processor.
*/
/*
* IMPORTANT: Registers at this point must be:
* %r12 = new thread
*
* Here we are in the idle thread, have dropped the old thread.
*/
/*
* spin until dispatched thread's mutex has
* been unlocked. this mutex is unlocked when
* it becomes safe for the thread to run.
*/
/*
* Fix CPU structure to indicate new running thread.
* Set pointer in new thread to the CPU structure.
*/
/* cp->cpu_stats.sys.cpumigrate++ */
/*
* Setup rsp0 (kernel stack) in TSS to curthread's stack.
* (Note: Since we don't have saved 'regs' structure for all
* the threads we can't easily determine if we need to
* change rsp0. So, we simply change the rsp0 to bottom
* of the thread stack and it will work for all cases.)
*
* XX64 - Is this correct?
*/
#if !defined(__xpv)
#else
#endif /* __xpv */
mfence /* synchronize with mutex_exit() */
/*
* Call restorectx if context ops have been installed.
*/
/*
* Call restorepctx if context ops have been installed for the proc.
*/
/*
* Restore non-volatile registers, then have spl0 return to the
* resuming thread's PC after first setting the priority as low as
* possible and blocking all interrupt threads that may be active.
*/
/*
* Remove stack frame created in SAVE_REGS()
*/
/*
* Save non-volatile registers, and set return address for current
* thread to resume_return.
*
* %edi = t (new thread) when done.
*/
#ifdef DEBUG
#endif
/*
* Call savectx if thread has installed context ops.
*
* Note that if we have floating point context, the save op
* (either fpsave_begin or fpxsave_begin) will issue the
* async save instruction (fnsave or fxsave respectively)
* that we fwait for below.
*/
/*
* Call savepctx if process has installed context ops.
*/
/*
* Temporarily switch to the idle thread's stack
*/
/*
* Set the idle thread as the current thread
*/
/* switch in the hat context for the new thread */
/*
* Clear and unlock previous thread's t_lock
* to allow it to be dispatched by another processor.
*/
/*
* IMPORTANT: Registers at this point must be:
* %edi = new thread
*
* Here we are in the idle thread, have dropped the old thread.
*/
/*
* spin until dispatched thread's mutex has
* been unlocked. this mutex is unlocked when
* it becomes safe for the thread to run.
*/
.L4:
/*
* Fix CPU structure to indicate new running thread.
* Set pointer in new thread to the CPU structure.
*/
/* Pentium. Used few lines below */
.L5_1:
/*
* Setup esp0 (kernel stack) in TSS to curthread's stack.
* (Note: Since we don't have saved 'regs' structure for all
* the threads we can't easily determine if we need to
* change esp0. So, we simply change the esp0 to bottom
* of the thread stack and it will work for all cases.)
*/
#if !defined(__xpv)
#else
#endif /* __xpv */
mfence /* synchronize with mutex_exit() */
/*
* Call restorectx if context ops have been installed.
*/
/*
* Call restorepctx if context ops have been installed for the proc.
*/
/*
* Restore non-volatile registers, then have spl0 return to the
* resuming thread's PC after first setting the priority as low as
* possible and blocking all interrupt threads that may be active.
*/
/*
* Remove stack frame created in SAVE_REGS()
*/
.L4_2:
.L5_2:
/* cp->cpu_stats.sys.cpumigrate++ */
#endif /* __amd64 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
/*
* Save non-volatile registers, and set return address for current
* thread to resume_from_zombie_return.
*
* %r12 = t (new thread) when done
*/
/* clean up the fp unit. It might be left enabled */
#if defined(__xpv) /* XXPV XXtclayton */
/*
* Remove this after bringup.
* (Too many #gp's for an instrumented hypervisor.)
*/
#else
fninit /* init fpu & discard pending error */
#endif /* __xpv */
/*
* Temporarily switch to the idle thread's stack so that the zombie
* thread's stack can be reclaimed by the reaper.
*/
/*
* Sigh. If the idle thread has never run thread_start()
* then t_sp is mis-aligned by thread_load().
*/
/*
* Set the idle thread as the current thread.
*/
/* switch in the hat context for the new thread */
/*
* Put the zombie on death-row.
*/
/*
* Remove stack frame created in SAVE_REGS()
*/
/*
* Save non-volatile registers, and set return address for current
* thread to resume_from_zombie_return.
*
* %edi = t (new thread) when done.
*/
#ifdef DEBUG
#endif
/* clean up the fp unit. It might be left enabled */
fninit /* init fpu & discard pending error */
/*
* Temporarily switch to the idle thread's stack so that the zombie
* thread's stack can be reclaimed by the reaper.
*/
/*
* Set the idle thread as the current thread.
*/
/*
* switch in the hat context for the new thread
*/
/*
* Put the zombie on death-row.
*/
/*
* Remove stack frame created in SAVE_REGS()
*/
#endif /* __amd64 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
/*
* Save non-volatile registers, and set return address for current
* thread to resume_from_intr_return.
*
* %r12 = t (new thread) when done
*/
mfence /* synchronize with mutex_exit() */
/*
* Unlock outgoing thread's mutex dispatched by another processor.
*/
/*
* Restore non-volatile registers, then have spl0 return to the
* resuming thread's PC after first setting the priority as low as
* possible and blocking all interrupt threads that may be active.
*/
RESTORE_REGS(%r11);
/*
* Remove stack frame created in SAVE_REGS()
*/
/*
* Save non-volatile registers, and set return address for current
* thread to resume_return.
*
* %edi = t (new thread) when done.
*/
#ifdef DEBUG
#endif
mfence /* synchronize with mutex_exit() */
/*
* Unlock outgoing thread's mutex dispatched by another processor.
*/
/*
* Restore non-volatile registers, then have spl0 return to the
* resuming thread's PC after first setting the priority as low as
* possible and blocking all interrupt threads that may be active.
*/
/*
* Remove stack frame created in SAVE_REGS()
*/
#endif /* __amd64 */
#endif /* __lint */
#if defined(__lint)
void
thread_start(void)
{}
#else /* __lint */
#if defined(__amd64)
/*NOTREACHED*/
/*NOTREACHED*/
#endif /* __i386 */
#endif /* __lint */