swtch.s revision bd28a477274db2b836577dfd6f223c696ad00720
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* Process switching routines.
*/
#if !defined(lint)
#include "assym.h"
#else /* lint */
#endif /* lint */
#include <sys/asm_linkage.h>
#include <sys/machthread.h>
#include <sys/machclock.h>
#include <sys/privregs.h>
#include <vm/hat_sfmmu.h>
/*
* resume(kthread_id_t)
*
* a thread can only run on one processor at a time. there
* exists a window on MPs where the current thread on one
* processor is capable of being dispatched by another processor.
* some overlap between outgoing and incoming threads can happen
* when they are the same thread. in this case where the threads
* are the same, resume() on one processor will spin on the incoming
* thread until resume() on the other processor has finished with
* the outgoing thread.
*
* The MMU context changes when the resuming thread resides in a different
* process. Kernel threads are known by resume to reside in process 0.
* The MMU context, therefore, only changes when resuming a thread in
* a process different from curproc.
*
* resume_from_intr() is called when the thread being resumed was not
* passivated by resume (e.g. was interrupted). This means that the
* resume lock is already held and that a restore context is not needed.
* Also, the MMU context is not changed on the resume in this case.
*
* resume_from_zombie() is the same as resume except the calling thread
* is a zombie and must be put on the deathrow list after the CPU is
* off the stack.
*/
#if defined(lint)
/* ARGSUSED */
void
{}
#else /* lint */
!
!
!
!
!
0:
#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
#endif
ldn [THREAD_REG + T_CTX], %g3 ! get ctx pointer
ldn [%o4 + LWP_FPU], %o0 ! fp pointer
rd %gsr, %g5
1:
!
!
2:
!
!
ldn [%i1 + CPU_IDLE_THREAD], %o0 ! idle thread pointer
ldn [%o0 + T_SP], %o1 ! get onto idle thread stack
sub %o1, SA(MINFRAME), %sp ! save room for ins and locals
clr %fp
!
! Set the idle thread as the current thread
!
mov THREAD_REG, %l3 ! save %g7 (current thread)
mov %o0, THREAD_REG ! set %g7 to idle
!
! to allow it to be dispatched by another processor.
!
clrb [%l3 + T_LOCK] ! clear tp->t_lock
!
! IMPORTANT: Registers at this point must be:
! %i0 = new thread
! %i1 = cpu pointer
! %i2 = old proc pointer
! %i3 = new proc pointer
!
! Here we are in the idle thread, have dropped the old thread.
!
ALTENTRY(_resume_from_idle)
! SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3)
SET_KCONTEXTREG(%o0, %g1, %g2, %g3, %o3, l1, l2, l3)
cmp %i2, %i3 ! resuming the same process?
be,pt %xcc, 5f ! yes.
nop
ldx [%i3 + P_AS], %o0 ! load p->p_as
ldx [%o0 + A_HAT], %i5 ! %i5 = new proc hat
!
! update cpusran field
!
ld [%i1 + CPU_ID], %o4
add %i5, SFMMU_CPUSRAN, %o5
CPU_INDEXTOSET(%o5, %o4, %g1)
ldx [%o5], %o2 ! %o2 = cpusran field
mov 1, %g2
sllx %g2, %o4, %o4 ! %o4 = bit for this cpu
andcc %o4, %o2, %g0
bnz,pn %xcc, 0f ! bit already set, go to 0
nop
3:
0:
!
!
!
!
!
!
!
3:
nop
ldn [%i5 + SFMMU_SCDP], %o0 ! using shared contexts?
brz,a,pt %o0, 4f
nop
ldn [%o0 + SCD_SFMMUP], %o0 ! %o0 = scdp->scd_sfmmup
mov %i1, %o2 ! %o2 = CPU
set SFMMU_SHARED, %o3 ! %o3 = sfmmu shared flag
call sfmmu_alloc_ctx
mov 1, %o1 ! %o1 = allocate flag = 1
4:
call sfmmu_load_mmustate ! program MMU registers
mov %i5, %o0
wrpr %g0, %i4, %pstate ! enable interrupts
5:
!
!
6:
brnz,pn %o0, 7f ! lock failed
ldx [%i0 + T_PC], %i7 ! delay - restore resuming thread's pc
!
!
4:
brz,a,pn %o1, 1f ! if no lwp, branch and clr mpcb
stx %g0, [%i1 + CPU_MPCB]
!
! user thread
! o1 = lwp
! i0 = new thread
!
ldx [%i0 + T_STACK], %o0
#ifdef CPU_MPCB_PA
#endif
ldx [%i0 + T_SP], %o0 ! restore resuming thread's sp
!
! Note that the ld to the gsr register ensures that the loading of
! the floating point saved state has completed without necessity
! of a membar #Sync.
!
#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
sethi %hi(fpu_exists), %g3
ld [%g3 + %lo(fpu_exists)], %g3
brz,pn %g3, 2f
ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx?
#endif
ldx [%o1 + LWP_FPU], %o0 ! fp pointer
ld [%o0 + FPU_FPRS], %g5 ! get fpu_fprs
andcc %g5, FPRS_FEF, %g0 ! is FPRS_FEF set?
bz,a,pt %icc, 9f ! no, skip fp_restore
wr %g0, FPRS_FEF, %fprs ! enable fprs so fp_zero works
ldx [THREAD_REG + T_CPU], %o4 ! cpu pointer
call fp_restore
wr %g5, %g0, %fprs ! enable fpu and restore fprs
ldx [%o0 + FPU_GSR], %g5 ! load saved GSR data
wr %g5, %g0, %gsr ! restore %gsr data
ba,pt %icc,2f
ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx?
9:
!
!
1:
#ifdef CPU_MPCB_PA
#endif
!
!
!
ldx [%i0 + T_SP], %o0 ! restore resuming thread's sp
!
! Note that the ld to the gsr register ensures that the loading of
! the floating point saved state has completed without necessity
! of a membar #Sync.
!
ldx [%i0 + T_STACK], %o0
ld [%o0 + SA(MINFRAME) + FPU_FPRS], %g5 ! load fprs
ldx [%i0 + T_CTX], %i5 ! should thread restorectx?
andcc %g5, FPRS_FEF, %g0 ! did we save fp in stack?
bz,a,pt %icc, 2f
wr %g0, %g0, %fprs ! clr fprs
wr %g5, %g0, %fprs ! enable fpu and restore fprs
call fp_restore
add %o0, SA(MINFRAME), %o0 ! o0 = kpu_t ptr
ldx [%o0 + FPU_GSR], %g5 ! load saved GSR data
wr %g5, %g0, %gsr ! restore %gsr data
2:
!
! Restore resuming thread's context
!
!
!
8:
!
!
5:
0:
1:
!
!
7:
SET_SIZE(_resume_from_idle)
SET_SIZE(resume)
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
void
resume_from_zombie(kthread_id_t t)
{}
#else /* lint */
ENTRY(resume_from_zombie)
save %sp, -SA(MINFRAME), %sp ! save ins and locals
call __dtrace_probe___sched_off__cpu ! DTrace probe
mov %i0, %o0 ! arg for DTrace probe
ldn [THREAD_REG + T_CPU], %i1 ! cpu pointer
flushw ! flushes all but this window
ldn [THREAD_REG + T_PROCP], %i2 ! old procp for mmu ctx
!
!
ldn [%i1 + CPU_IDLE_THREAD], %o2 ! idle thread pointer
ldn [%o2 + T_SP], %o1 ! get onto idle thread stack
sub %o1, SA(MINFRAME), %sp ! save room for ins and locals
clr %fp
!
! Set the idle thread as the current thread.
! Put the zombie on death-row.
!
mov THREAD_REG, %o0 ! save %g7 = curthread for arg
mov %o2, THREAD_REG ! set %g7 to idle
stn %g0, [%i1 + CPU_MPCB] ! clear mpcb
#ifdef CPU_MPCB_PA
mov -1, %o1
stx %o1, [%i1 + CPU_MPCB_PA]
#endif
call reapq_add ! reapq_add(old_thread);
!
!
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
void
{}
#else /* lint */
!
! doesn't retain their floating-point registers when
! switching out of the interrupt context.
!
rd %fprs, %g4
ldn [THREAD_REG + T_STACK], %i2
andcc %g4, FPRS_FEF, %g0 ! is FPRS_FEF set
bz,pt %icc, 4f
st %g4, [%i2 + SA(MINFRAME) + FPU_FPRS] ! save fprs
! save kernel fp state in stack
add %i2, SA(MINFRAME), %o0 ! %o0 = kfpu_t ptr
rd %gsr, %g5
call fp_save
stx %g5, [%o0 + FPU_GSR] ! store GSR
4:
flushw ! flushes all but this window
stn %fp, [THREAD_REG + T_SP] ! delay - save sp
stn %i7, [THREAD_REG + T_PC] ! save return address
ldn [%i0 + T_PC], %i7 ! restore resuming thread's pc
!
!
mov THREAD_REG, %l3 ! save old thread
!
!
ldn [THREAD_REG + T_SP], %o0 ! restore resuming thread's sp
!
! If we are resuming an interrupt thread, store a timestamp in the
! thread structure.
!
lduh [THREAD_REG + T_FLAGS], %o0
andcc %o0, T_INTR_THREAD, %g0
bnz,pn %xcc, 0f
!
!
1:
0:
!
! We're an interrupt thread. Update t_intr_start and cpu_intrcnt
!
add THREAD_REG, T_INTR_START, %o2
2:
ldx [%o2], %o1
RD_CLOCK_TICK(%o0,%o3,%l1,__LINE__)
casx [%o2], %o1, %o0
cmp %o0, %o1
bne,pn %xcc, 2b
ldn [THREAD_REG + T_INTR], %l1 ! delay
2:
3:
!
! We're a non-interrupt thread and cpu_kprunrun is set. call kpreempt.
!
call kpreempt
mov KPREEMPT_SYNC, %o0
ba,pt %xcc, 1b
nop
SET_SIZE(resume_from_intr)
#endif /* lint */
/*
* thread_start()
*
* the current register window was crafted by thread_run() to contain
* an address of a procedure (in register %i7), and its args in registers
* %i0 through %i5. a stack trace of this thread will show the procedure
* that thread_start() invoked at the bottom of the stack. an exit routine
* is stored in %l0 and called when started thread returns from its called
* procedure.
*/
#if defined(lint)
void
thread_start(void)
{}
#else /* lint */
ENTRY(thread_start)
mov %i0, %o0
unimp 0
#endif /* lint */