thread.h revision 454ab20244cd84c2b93aa273b462eab1166cf539
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_THREAD_H
#define _SYS_THREAD_H
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* The thread object, its states, and the methods by which it
* is accessed.
*/
/*
* Values that t_state may assume. Note that t_state cannot have more
* than one of these flags set at a time.
*/
typedef struct ctxop {
void (*save_op)(void *); /* function to invoke to save context */
void (*restore_op)(void *); /* function to invoke to restore ctx */
void (*fork_op)(void *, void *); /* invoke to fork context */
void (*lwp_create_op)(void *, void *); /* lwp_create context */
void (*exit_op)(void *); /* invoked during {thread,lwp}_exit() */
void (*free_op)(void *, int); /* function which frees the context */
void *arg; /* argument to above functions, ctx pointer */
} ctxop_t;
/*
* The active file descriptor table.
* Each member of a_fd[] not equalling -1 represents an active fd.
* The structure is initialized on first use; all zeros means uninitialized.
*/
typedef struct _afd {
int *a_fd; /* pointer to list of fds */
short a_nfd; /* number of entries in *a_fd */
short a_stale; /* one of the active fds is being closed */
} afd_t;
/*
* An lwpchan provides uniqueness when sleeping on user-level
* synchronization primitives. The lc_wchan member is used
* for sleeping on kernel synchronization primitives.
*/
typedef struct {
} lwpchan_t;
typedef struct _kthread *kthread_id_t;
struct turnstile;
struct panic_trap_info;
struct upimutex;
struct kproject;
struct on_trap_data;
struct waitq;
struct _kcpc_ctx;
struct _kcpc_set;
/* Definition for kernel thread identifier type */
typedef struct _kthread {
void (*t_startpc)(void); /* PC where thread started */
short t_affinitycnt; /* nesting level of kernel affinity-setting */
short t_bind_cpu; /* user-specified CPU binding (-1 if none) */
volatile char t_preempt; /* don't preempt thread if set */
volatile char t_preempt_lk;
char t_writer; /* sleeping in lwp_rwlock_lock(RW_WRITE_LOCK) */
struct _sobj_ops *t_sobj_ops;
void *t_cldata; /* per scheduling class specific data */
char t_nomigrate; /* do not migrate if set */
/*
* non swappable part of the lwp state.
*/
int t_dslot; /* index in proc's thread directory */
/* point at right of high-order bit */
short t_sysnum; /* system call number */
/*
* Pointer to the dispatcher lock protecting t_state and state-related
* flags. This pointer can change during waits on the lock, so
* it should be grabbed only by thread_lock().
*/
volatile char t_pre_sys; /* pre-syscall work needed */
/*
* Post-syscall / post-trap flags.
* No lock is required to set these.
* These must be cleared only by the thread itself.
*
* t_astflag indicates that some post-trap processing is required,
* possibly a signal or a preemption. The thread will not
* return to user with this set.
* t_post_sys indicates that some unusualy post-system call
* handling is required, such as an error or tracing.
* t_sig_check indicates that some condition in ISSIG() must be
* checked, but doesn't prevent returning to user.
* t_post_sys_ast is a way of checking whether any of these three
* flags are set.
*/
union __tu {
struct __ts {
volatile char _t_astflag; /* AST requested */
volatile char _t_sig_check; /* ISSIG required */
volatile char _t_post_sys; /* post_syscall req */
volatile char _t_trapret; /* call CL_TRAPRET */
} _ts;
volatile int _t_post_sys_ast; /* OR of these flags */
} _tu;
/*
* Real time microstate profiling.
*/
/* possible 4-byte filler */
int t_mstate; /* current microstate */
struct rprof {
int rp_anystate; /* set if any state non-zero */
} *t_rprof;
/*
* There is a turnstile inserted into the list below for
* every priority inverted synchronization object that
* this thread holds.
*/
/*
* Pointer to the turnstile attached to the synchronization
* object where this thread is blocked.
*/
/*
* kernel thread specific data
* Borrowed from userland implementation of POSIX tsd
*/
struct tsd_thread {
} *t_tsd;
int t_bind_pset; /* processor set binding */
int *t_lgrp_affinity; /* lgroup affinity */
void *t_taskq; /* for threads belonging to taskq */
/* was added to an lgroup's load */
/* on this thread's behalf */
char *t_pdmsg; /* privilege debugging message */
union __tdu {
struct __tds {
#ifdef __amd64
#endif
} _tds;
} _tdu;
#ifdef __amd64
#endif
#ifdef __amd64
#endif
} kthread_t;
/*
* Thread flag (t_flag) definitions.
* These flags must be changed only for the current thread,
* and not during preemption code, since the code being
* preempted could be modifying the flags.
*
* For the most part these flags do not need locking.
* The following flags will only be changed while the thread_lock is held,
* to give assurrance that they are consistent with t_state:
* T_WAKEABLE
*/
/*
* Flags in t_proc_flag.
* These flags must be modified only when holding the p_lock
* for the associated process.
*/
/*
* Thread scheduler flag (t_schedflag) definitions.
* The thread must be locked via thread_lock() or equiv. to change these.
*/
#define TS_ALLSTART \
/*
* Thread binding types
*/
#define TB_ALLHARD 0
#define TB_CPU_IS_HARD(t) (!TB_CPU_IS_SOFT(t))
/*
* No locking needed for AST field.
*/
/* True if thread is stopped on an event of interest */
!((t)->t_schedflag & TS_PSTART))
/* True if thread is asleep and wakeable */
((t)->t_flag & T_WAKEABLE)))
/* True if thread is on the wait queue */
/* similar to ISTOPPED except the event of interest is CPR */
!((t)->t_schedflag & TS_RESUME))
/*
* True if thread is virtually stopped (is or was asleep in
* one of the lwp_*() system calls and marked to stop by /proc.)
*/
/* similar to VSTOPPED except the point of interest is CPR */
#define CPR_VSTOPPED(t) \
((t)->t_flag & T_WAKEABLE) && \
((t)->t_proc_flag & TP_CHKPT))
/* True if thread has been stopped by hold*() or was created stopped */
/* True if thread possesses an inherited priority */
/* The dispatch priority of a thread */
/* The assigned priority of a thread */
#define ASSIGNED_PRIO(t) ((t)->t_pri)
/*
* Macros to determine whether a thread can be swapped.
* If t_lock is held, the thread is either on a processor or being swapped.
*/
/*
* proctot(x)
* convert a proc pointer to a thread pointer. this only works with
* procs that have only one lwp.
*
* proctolwp(x)
* convert a proc pointer to a lwp pointer. this only works with
* procs that have only one lwp.
*
* ttolwp(x)
* convert a thread pointer to its lwp pointer.
*
* ttoproc(x)
* convert a thread pointer to its proc pointer.
*
* ttoproj(x)
* convert a thread pointer to its project pointer.
*
* ttozone(x)
* convert a thread pointer to its zone pointer.
*
* lwptot(x)
* convert a lwp pointer to its thread pointer.
*
* lwptoproc(x)
* convert a lwp to its proc pointer.
*/
#define lwptot(x) ((x)->lwp_thread)
#ifdef _KERNEL
/*
* thread_free_lock is used by the tick accounting thread to keep a thread
* from being freed while it is being examined.
*/
#define THREAD_FREE_NUM 1024
#define THREAD_FREE_SHIFT_BITS 5
typedef struct thread_free_lock {
extern void thread_free_prevent(kthread_t *);
extern void thread_free_allow(kthread_t *);
/*
* Routines to change the priority and effective priority
* of a thread-locked thread, whatever its state.
*/
/*
* Routines that manipulate the dispatcher lock for the thread.
* The locking heirarchy is as follows:
* cpu_lock > sleepq locks > run queue locks
*/
extern int default_binding_mode;
#endif /* _KERNEL */
/*
* Macros to indicate that the thread holds resources that could be critical
* to other kernel threads, so this thread needs to have kernel priority
* if it blocks or is preempted. Note that this is not necessary if the
* resource is a mutex or a writer lock because of priority inheritance.
*
* The only way one thread may legally manipulate another thread's t_kpri_req
* is to hold the target thread's thread lock while that thread is asleep.
* (The rwlock code does this to implement direct handoff to waiting readers.)
*/
/*
* Macro to change a thread's priority.
*/
#define THREAD_CHANGE_PRI(t, pri) { \
schedctl_set_cidpri(t); \
}
/*
* Macro to indicate that a thread's priority is about to be changed.
*/
#define THREAD_WILLCHANGE_PRI(t, pri) { \
}
/*
* Macros to change thread state and the associated lock.
*/
/*
* Point it at the transition lock, which is always held.
* The previosly held lock is dropped.
*/
/*
* Set the thread's lock to be the transition lock, without dropping
* previosly held lock.
*/
/*
* Put thread in run state, and set the lock pointer to the dispatcher queue
* lock pointer provided. This lock should be held.
*/
/*
* Put thread in wait state, and set the lock pointer to the wait queue
* lock pointer provided. This lock should be held.
*/
/*
* Put thread in run state, and set the lock pointer to the dispatcher queue
* lock pointer provided (i.e., the "swapped_lock"). This lock should be held.
*/
/*
* Put the thread in zombie state and set the lock pointer to NULL.
* The NULL will catch anything that tries to lock a zombie.
*/
/*
* Set the thread into ONPROC state, and point the lock at the CPUs
* lock for the onproc thread(s). This lock should be held, so the
* thread deoes not become unlocked, since these stores can be reordered.
*/
/*
* Set the thread into the TS_SLEEP state, and set the lock pointer to
* to some sleep queue's lock. The new lock should already be held.
*/
disp_lock_t *tlp; \
}
/*
* Interrupt threads are created in TS_FREE state, and their lock
* points at the associated CPU's lock.
*/
/* if tunable kmem_stackinfo is set, fill kthread stack with a pattern */
#define KMEM_STKINFO_PATTERN 0xbadcbadcbadcbadcULL
/*
* If tunable kmem_stackinfo is set, log the latest KMEM_LOG_STK_USAGE_SIZE
* dead kthreads that used their kernel stack the most.
*/
#define KMEM_STKINFO_LOG_SIZE 16
#define KMEM_STKINFO_STR_SIZE 64
/*
* stackinfo logged data.
*/
typedef struct kmem_stkinfo {
#ifdef __cplusplus
}
#endif
#endif /* _SYS_THREAD_H */