/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_THREAD_H
#define _SYS_THREAD_H
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* The thread object, its states, and the methods by which it
* is accessed.
*/
/*
* Values that t_state may assume. Note that t_state cannot have more
* than one of these flags set at a time.
*/
typedef struct ctxop {
} ctxop_t;
/*
* The active file descriptor table.
* Each member of a_fd[] not equalling -1 represents an active fd.
* The structure is initialized on first use; all zeros means uninitialized.
*/
typedef struct {
} afd_t;
/*
* An lwpchan provides uniqueness when sleeping on user-level
* synchronization primitives. The lc_wchan member is used
* for sleeping on kernel synchronization primitives.
*/
typedef struct {
} lwpchan_t;
struct turnstile;
struct panic_trap_info;
struct upimutex;
struct kproject;
struct on_trap_data;
struct waitq;
struct _kcpc_ctx;
struct _kcpc_set;
/* Definition for kernel thread identifier type */
typedef struct _kthread {
volatile char t_preempt_lk;
/*
* non swappable part of the lwp state.
*/
/* point at right of high-order bit */
/*
* Pointer to the dispatcher lock protecting t_state and state-related
* flags. This pointer can change during waits on the lock, so
* it should be grabbed only by thread_lock().
*/
/*
* Post-syscall / post-trap flags.
* No lock is required to set these.
* These must be cleared only by the thread itself.
*
* t_astflag indicates that some post-trap processing is required,
* possibly a signal or a preemption. The thread will not
* return to user with this set.
* t_post_sys indicates that some unusualy post-system call
* handling is required, such as an error or tracing.
* t_sig_check indicates that some condition in ISSIG() must be
* checked, but doesn't prevent returning to user.
* t_post_sys_ast is a way of checking whether any of these three
* flags are set.
*/
union __tu {
struct __ts {
} _ts;
} _tu;
/*
* Real time microstate profiling.
*/
/* possible 4-byte filler */
struct rprof {
} *t_rprof;
/*
* There is a turnstile inserted into the list below for
* every priority inverted synchronization object that
* this thread holds.
*/
/*
* Pointer to the turnstile attached to the synchronization
* object where this thread is blocked.
*/
/*
* kernel thread specific data
* Borrowed from userland implementation of POSIX tsd
*/
struct tsd_thread {
} *t_tsd;
/* was added to an lgroup's load */
/* on this thread's behalf */
union __tdu {
struct __tds {
#ifdef __amd64
#endif
} _tds;
} _tdu;
#ifdef __amd64
#endif
#ifdef __amd64
#endif
} kthread_t;
/*
* Thread flag (t_flag) definitions.
* These flags must be changed only for the current thread,
* and not during preemption code, since the code being
* preempted could be modifying the flags.
*
* For the most part these flags do not need locking.
* The following flags will only be changed while the thread_lock is held,
* to give assurrance that they are consistent with t_state:
* T_WAKEABLE
*/
/*
* Flags in t_proc_flag.
* These flags must be modified only when holding the p_lock
* for the associated process.
*/
/*
* Thread scheduler flag (t_schedflag) definitions.
* The thread must be locked via thread_lock() or equiv. to change these.
*/
#define TS_ALLSTART \
/*
* Thread binding types
*/
#define TB_ALLHARD 0
/*
* No locking needed for AST field.
*/
/* True if thread is stopped on an event of interest */
!((t)->t_schedflag & TS_PSTART))
/* True if thread is asleep and wakeable */
((t)->t_flag & T_WAKEABLE)))
/* True if thread is on the wait queue */
/* similar to ISTOPPED except the event of interest is CPR */
!((t)->t_schedflag & TS_RESUME))
/*
* True if thread is virtually stopped (is or was asleep in
* one of the lwp_*() system calls and marked to stop by /proc.)
*/
/* similar to VSTOPPED except the point of interest is CPR */
#define CPR_VSTOPPED(t) \
((t)->t_flag & T_WAKEABLE) && \
((t)->t_proc_flag & TP_CHKPT))
/* True if thread has been stopped by hold*() or was created stopped */
/* True if thread possesses an inherited priority */
/* The dispatch priority of a thread */
/* The assigned priority of a thread */
/*
* Macros to determine whether a thread can be swapped.
* If t_lock is held, the thread is either on a processor or being swapped.
*/
/*
* proctot(x)
* convert a proc pointer to a thread pointer. this only works with
* procs that have only one lwp.
*
* proctolwp(x)
* convert a proc pointer to a lwp pointer. this only works with
* procs that have only one lwp.
*
* ttolwp(x)
* convert a thread pointer to its lwp pointer.
*
* ttoproc(x)
* convert a thread pointer to its proc pointer.
*
* ttoproj(x)
* convert a thread pointer to its project pointer.
*
* ttozone(x)
* convert a thread pointer to its zone pointer.
*
* lwptot(x)
* convert a lwp pointer to its thread pointer.
*
* lwptoproc(x)
* convert a lwp to its proc pointer.
*/
#ifdef _KERNEL
/*
* thread_free_lock is used by the tick accounting thread to keep a thread
* from being freed while it is being examined.
*
* Thread structures are 32-byte aligned structures. That is why we use the
* following formula.
*/
#define THREAD_FREE_SHIFT(t) \
typedef struct thread_free_lock {
extern void thread_free_prevent(kthread_t *);
extern void thread_free_allow(kthread_t *);
/*
* Routines to change the priority and effective priority
* of a thread-locked thread, whatever its state.
*/
/*
* Routines that manipulate the dispatcher lock for the thread.
* The locking heirarchy is as follows:
* cpu_lock > sleepq locks > run queue locks
*/
extern int default_binding_mode;
#endif /* _KERNEL */
/*
* Macros to indicate that the thread holds resources that could be critical
* to other kernel threads, so this thread needs to have kernel priority
* if it blocks or is preempted. Note that this is not necessary if the
* resource is a mutex or a writer lock because of priority inheritance.
*
* The only way one thread may legally manipulate another thread's t_kpri_req
* is to hold the target thread's thread lock while that thread is asleep.
* (The rwlock code does this to implement direct handoff to waiting readers.)
*/
/*
* Macro to change a thread's priority.
*/
schedctl_set_cidpri(t); \
}
/*
* Macro to indicate that a thread's priority is about to be changed.
*/
}
/*
* Macros to change thread state and the associated lock.
*/
/*
* Point it at the transition lock, which is always held.
* The previosly held lock is dropped.
*/
/*
* Set the thread's lock to be the transition lock, without dropping
* previosly held lock.
*/
/*
* Put thread in run state, and set the lock pointer to the dispatcher queue
* lock pointer provided. This lock should be held.
*/
/*
* Put thread in wait state, and set the lock pointer to the wait queue
* lock pointer provided. This lock should be held.
*/
/*
* Put thread in run state, and set the lock pointer to the dispatcher queue
* lock pointer provided (i.e., the "swapped_lock"). This lock should be held.
*/
/*
* Put the thread in zombie state and set the lock pointer to NULL.
* The NULL will catch anything that tries to lock a zombie.
*/
/*
* Set the thread into ONPROC state, and point the lock at the CPUs
* lock for the onproc thread(s). This lock should be held, so the
* thread deoes not become unlocked, since these stores can be reordered.
*/
/*
* Set the thread into the TS_SLEEP state, and set the lock pointer to
* to some sleep queue's lock. The new lock should already be held.
*/
disp_lock_t *tlp; \
}
/*
* Interrupt threads are created in TS_FREE state, and their lock
* points at the associated CPU's lock.
*/
/* if tunable kmem_stackinfo is set, fill kthread stack with a pattern */
/*
* If tunable kmem_stackinfo is set, log the latest KMEM_LOG_STK_USAGE_SIZE
* dead kthreads that used their kernel stack the most.
*/
/*
* stackinfo logged data.
*/
typedef struct kmem_stkinfo {
#ifdef __cplusplus
}
#endif
#endif /* _SYS_THREAD_H */