swtch.s revision 1e2e7a75ddb1eedcefa449ce98fd5862749b72ee
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Process switching routines.
*/
#if !defined(lint)
#include "assym.h"
#else /* lint */
#endif /* lint */
#include <sys/asm_linkage.h>
#include <sys/machthread.h>
#include <sys/privregs.h>
#include <vm/hat_sfmmu.h>
/*
* resume(kthread_id_t)
*
* a thread can only run on one processor at a time. there
* exists a window on MPs where the current thread on one
* processor is capable of being dispatched by another processor.
* some overlap between outgoing and incoming threads can happen
* when they are the same thread. in this case where the threads
* are the same, resume() on one processor will spin on the incoming
* thread until resume() on the other processor has finished with
* the outgoing thread.
*
* The MMU context changes when the resuming thread resides in a different
* process. Kernel threads are known by resume to reside in process 0.
* The MMU context, therefore, only changes when resuming a thread in
* a process different from curproc.
*
* resume_from_intr() is called when the thread being resumed was not
* passivated by resume (e.g. was interrupted). This means that the
* resume lock is already held and that a restore context is not needed.
* Also, the MMU context is not changed on the resume in this case.
*
* resume_from_zombie() is the same as resume except the calling thread
* is a zombie and must be put on the deathrow list after the CPU is
* off the stack.
*/
#if defined(lint)
/* ARGSUSED */
void
{}
#else /* lint */
!
!
!
!
!
0:
#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
#endif
ldn [THREAD_REG + T_CTX], %g3 ! get ctx pointer
ldn [%o4 + LWP_FPU], %o0 ! fp pointer
rd %gsr, %g5
1:
!
!
2:
!
!
ldn [%i1 + CPU_IDLE_THREAD], %o0 ! idle thread pointer
ldn [%o0 + T_SP], %o1 ! get onto idle thread stack
sub %o1, SA(MINFRAME), %sp ! save room for ins and locals
clr %fp
!
! Set the idle thread as the current thread
!
mov THREAD_REG, %l3 ! save %g7 (current thread)
mov %o0, THREAD_REG ! set %g7 to idle
!
! to allow it to be dispatched by another processor.
!
clrb [%l3 + T_LOCK] ! clear tp->t_lock
!
! IMPORTANT: Registers at this point must be:
! %i0 = new thread
! %i1 = cpu pointer
! %i2 = old proc pointer
! %i3 = new proc pointer
!
! Here we are in the idle thread, have dropped the old thread.
!
ALTENTRY(_resume_from_idle)
! SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3)
SET_KCONTEXTREG(%o0, %g1, %g2, %g3, %o3, l1, l2, l3)
cmp %i2, %i3 ! resuming the same process?
be,pt %xcc, 5f ! yes.
nop
ldx [%i3 + P_AS], %o0 ! load p->p_as
ldx [%o0 + A_HAT], %i5 ! %i5 = new proc hat
!
! update cpusran field
!
ld [%i1 + CPU_ID], %o4
add %i5, SFMMU_CPUSRAN, %o5
CPU_INDEXTOSET(%o5, %o4, %g1)
ldx [%o5], %o2 ! %o2 = cpusran field
mov 1, %g2
sllx %g2, %o4, %o4 ! %o4 = bit for this cpu
andcc %o4, %o2, %g0
bnz,pn %xcc, 0f ! bit already set, go to 0
nop
3:
0:
!
!
!
!
!
!
!
3:
4:
5:
!
! been unlocked. this mutex is unlocked when
! it becomes safe for the thread to run.
!
ldstub [%i0 + T_LOCK], %o0 ! lock curthread's t_lock
6:
!
! Fix CPU structure to indicate new running thread.
! Set pointer in new thread to the CPU structure.
! XXX - Move migration statistic out of here
!
ldx [%i0 + T_CPU], %g2 ! last CPU to run the new thread
cmp %g2, %i1 ! test for migration
be,pt %xcc, 4f ! no migration
ldn [%i0 + T_LWP], %o1 ! delay - get associated lwp (if any)
ldx [%i1 + CPU_STATS_SYS_CPUMIGRATE], %g2
inc %g2
stx %g2, [%i1 + CPU_STATS_SYS_CPUMIGRATE]
4:
membar #StoreLoad ! synchronize with mutex_exit()
mov %i0, THREAD_REG ! update global thread register
!
!
#ifdef CPU_MPCB_PA
ldx [%o0 + MPCB_PA], %o0
stx %o0, [%i1 + CPU_MPCB_PA]
#endif
! Switch to new thread's stack
sub %o0, SA(MINFRAME), %sp ! in case of intr or trap before restore
mov %o0, %fp
!
!
#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
#endif
9:
!
! Remove all possibility of using the fp regs as a "covert channel".
!
call fp_zero
wr %g0, %g0, %gsr
ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx?
ba,pt %icc, 2f
wr %g0, %g0, %fprs ! disable fprs
1:
#ifdef CPU_MPCB_PA
mov -1, %o1
stx %o1, [%i1 + CPU_MPCB_PA]
#endif
!
! kernel thread
! i0 = new thread
!
! Switch to new thread's stack
!
sub %o0, SA(MINFRAME), %sp ! in case of intr or trap before restore
mov %o0, %fp
!
!
2:
!
! i5 = ctx ptr
!
brz,a,pt %i5, 8f ! skip restorectx() when zero
ld [%i1 + CPU_BASE_SPL], %o0
call restorectx ! thread can not sleep on temp stack
mov THREAD_REG, %o0 ! delay slot - arg = thread pointer
!
! Set priority as low as possible, blocking all interrupt threads
! that may be active.
!
ld [%i1 + CPU_BASE_SPL], %o0
8:
wrpr %o0, 0, %pil
wrpr %g0, WSTATE_KERN, %wstate
!
! If we are resuming an interrupt thread, store a starting timestamp
! in the thread structure.
!
lduh [THREAD_REG + T_FLAGS], %o0
andcc %o0, T_INTR_THREAD, %g0
bnz,pn %xcc, 0f
nop
5:
call __dtrace_probe___sched_on__cpu ! DTrace probe
nop
ret ! resume curthread
restore
0:
add THREAD_REG, T_INTR_START, %o2
1:
ldx [%o2], %o1
rdpr %tick, %o0
sllx %o0, 1, %o0
srlx %o0, 1, %o0 ! shift off NPT bit
casx [%o2], %o1, %o0
cmp %o0, %o1
be,pt %xcc, 5b
nop
! If an interrupt occurred while we were attempting to store
! the timestamp, try again.
ba,pt %xcc, 1b
nop
!
! lock failed - spin with regular load to avoid cache-thrashing.
!
7:
brnz,a,pt %o0, 7b ! spin while locked
ldub [%i0 + T_LOCK], %o0
ba %xcc, 6b
ldstub [%i0 + T_LOCK], %o0 ! delay - lock curthread's mutex
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
void
{}
#else /* lint */
!
!
!
!
#ifdef CPU_MPCB_PA
#endif
!
! resume_from_idle args:
! %i0 = new thread
! %i1 = cpu
! %i2 = old proc
! %i3 = new proc
!
b _resume_from_idle ! finish job of resume
ldn [%i0 + T_PROCP], %i3 ! new process
SET_SIZE(resume_from_zombie)
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
void
resume_from_intr(kthread_id_t t)
{}
#else /* lint */
ENTRY(resume_from_intr)
save %sp, -SA(MINFRAME), %sp ! save ins and locals
flushw ! flushes all but this window
stn %fp, [THREAD_REG + T_SP] ! delay - save sp
stn %i7, [THREAD_REG + T_PC] ! save return address
ldn [%i0 + T_PC], %i7 ! restore resuming thread's pc
!
!
mov THREAD_REG, %l3 ! save old thread
!
!
ldn [THREAD_REG + T_SP], %o0 ! restore resuming thread's sp
!
! If we are resuming an interrupt thread, store a timestamp in the
! thread structure.
!
lduh [THREAD_REG + T_FLAGS], %o0
andcc %o0, T_INTR_THREAD, %g0
bnz,pn %xcc, 0f
!
!
1:
0:
!
! We're an interrupt thread. Update t_intr_start and cpu_intrcnt
!
add THREAD_REG, T_INTR_START, %o2
2:
ldx [%o2], %o1
rdpr %tick, %o0
sllx %o0, 1, %o0
srlx %o0, 1, %o0 ! shift off NPT bit
casx [%o2], %o1, %o0
cmp %o0, %o1
bne,pn %xcc, 2b
ldn [THREAD_REG + T_INTR], %l1 ! delay
2:
3:
!
! We're a non-interrupt thread and cpu_kprunrun is set. call kpreempt.
!
call kpreempt
mov KPREEMPT_SYNC, %o0
ba,pt %xcc, 1b
nop
SET_SIZE(resume_from_intr)
#endif /* lint */
/*
* thread_start()
*
* the current register window was crafted by thread_run() to contain
* an address of a procedure (in register %i7), and its args in registers
* %i0 through %i5. a stack trace of this thread will show the procedure
* that thread_start() invoked at the bottom of the stack. an exit routine
* is stored in %l0 and called when started thread returns from its called
* procedure.
*/
#if defined(lint)
void
thread_start(void)
{}
#else /* lint */
ENTRY(thread_start)
mov %i0, %o0
unimp 0
#endif /* lint */