cpuvar.h revision b8fac8e162eda7e98db13dfa3e439e43f90f41d9
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CPUVAR_H
#define _SYS_CPUVAR_H
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/processor.h>
#include <sys/machcpuvar.h>
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
struct squeue_set_s;
#define CPU_CACHE_COHERENCE_SIZE 64
#define S_LOADAVG_SZ 11
#define S_MOVAVG_SZ 10
struct loadavg_s {
int lg_cur; /* current loadavg entry */
unsigned int lg_len; /* number entries recorded */
};
/*
* For fast event tracing.
*/
struct ftrace_record;
typedef struct ftrace_data {
int ftd_state; /* ftrace flags */
struct cyc_cpu;
struct nvlist;
/*
* Per-CPU data.
*
* Be careful adding new members: if they are not the same in all modules (e.g.
* change size depending on a #define), CTF uniquification can fail to work
* properly. Furthermore, this is transitive in that it applies recursively to
* all types pointed to by cpu_t.
*/
typedef struct cpu {
int cpu_cache_offset; /* see kmem.c for details */
/*
* Links to other CPUs. It is safe to walk these lists if
* one of the following is true:
* - cpu_lock held
* - preemption disabled via kpreempt_disable
* - PIL >= DISP_LEVEL
* - acting thread is an interrupt thread
* - all other CPUs are paused
*/
struct cpu *cpu_prev_lpl;
/*
* Scheduling variables.
*/
/*
* Note that cpu_disp is set before the CPU is added to the system
* and is never modified. Hence, no additional locking is needed
* beyond what's necessary to access the cpu_t structure.
*/
char cpu_runrun; /* scheduling flag - set to preempt */
char cpu_kprunrun; /* force kernel preemption */
/* was chosen for scheduling */
/*
* The following field is updated when ever the cpu_dispthread
* changes. Also in places, where the current thread(cpu_dispthread)
* priority changes. This is used in disp_lowpri_cpu()
*/
/*
* Interrupt data.
*/
int cpu_base_spl; /* priority for highest rupt active */
/*
* Statistics.
*/
/*
* Configuration information for the processor_info system call.
*/
char cpu_cpr_flags; /* CPR related info */
char *cpu_idstr; /* for printing and debugging */
char *cpu_brandstr; /* for printing */
/*
* Sum of all device interrupt weights that are currently directed at
* this cpu. Cleared at start of interrupt redistribution.
*/
void *cpu_vm_data;
/*
* New members must be added /before/ this member, as the CTF tools
* rely on this being the last field before cpu_m, so they can
* correctly calculate the offset when synthetically adding the cpu_m
* member in objects that do not have it. This fixup is required for
* uniquification to work correctly.
*/
#endif
} cpu_t;
/*
* The cpu_core structure consists of per-CPU state available in any context.
* On some architectures, this may mean that the page(s) containing the
* NCPU-sized array of cpu_core structures must be locked in the TLB -- it
* is up to the platform to assure that this is performed properly. Note that
* the structure is sized to avoid false sharing.
*/
sizeof (kmutex_t))
typedef struct cpu_core {
} cpu_core_t;
#ifdef _KERNEL
extern cpu_core_t cpu_core[];
#endif /* _KERNEL */
/*
* CPU_ON_INTR() macro. Returns non-zero if currently on interrupt stack.
* Note that this isn't a test for a high PIL. For example, cpu_intr_actv
* does not get updated when we go through sys_trap from TL>0 at high PIL.
* getpil() should be used instead to check for PIL levels.
*/
/* MEMBERS PROTECTED BY "atomicity": cpu_flags */
/*
* Flags in the CPU structure.
*
* These are protected by cpu_lock (except during creation).
*
* Offlined-CPUs have three stages of being offline:
*
* CPU_ENABLE indicates that the CPU is participating in I/O interrupts
* that can be directed at a number of different CPUs. If CPU_ENABLE
* is off, the CPU will not be given interrupts that can be sent elsewhere,
* but will still get interrupts from devices associated with that CPU only,
* and from other CPUs.
*
* CPU_OFFLINE indicates that the dispatcher should not allow any threads
* other than interrupt threads to run on that CPU. A CPU will not have
* CPU_OFFLINE set if there are any bound threads (besides interrupts).
*
* CPU_QUIESCED is set if p_offline was able to completely turn idle the
* CPU and it will not have to run interrupt threads. In this case it'll
* stay in the idle loop until CPU_QUIESCED is turned off.
*
* CPU_FROZEN is used only by CPR to mark CPUs that have been successfully
* suspended (in the suspend path), or have yet to be resumed (in the resume
* case).
*
* On some platforms CPUs can be individually powered off.
* The following flags are set for powered off CPUs: CPU_QUIESCED,
* CPU_OFFLINE, and CPU_POWEROFF. The following flags are cleared:
* CPU_RUNNING, CPU_READY, CPU_EXISTS, and CPU_ENABLE.
*/
#define FMT_CPU_FLAGS \
"\20\12fault\11spare\10frozen" \
"\7poweroff\6offline\5enable\4exist\3quiesced\2ready\1run"
/*
* Flags for cpu_offline(), cpu_faulted(), and cpu_spare().
*/
/*
* DTrace flags.
*/
#if defined(__sparc)
#endif
/*
* Dispatcher flags
* These flags must be changed only by the current CPU.
*/
#endif /* _KERNEL || _KMEMUSER */
/*
* Macros for manipulating sets of CPUs as a bitmap. Note that this
* bitmap may vary in size depending on the maximum CPU id a specific
* platform supports. This may be different than the number of CPUs
* the platform supports, since CPU ids can be sparse. We define two
* sets of macros; one for platforms where the maximum CPU id is less
* than the number of bits in a single word (32 in a 32-bit kernel,
* 64 in a 64-bit kernel), and one for platforms that require bitmaps
* of more than one word.
*/
#if CPUSET_WORDS > 1
typedef struct cpuset {
} cpuset_t;
/*
* Private functions for manipulating cpusets that do not fit in a
* single word. These should not be used directly; instead the
* CPUSET_* macros should be used so the code will be portable
* across different definitions of NCPU.
*/
extern void cpuset_all(cpuset_t *);
extern int cpuset_isnull(cpuset_t *);
/*
* Find one CPU in the cpuset.
* Sets "cpu" to the id of the found CPU, or CPUSET_NOTINSET if no cpu
* could be found. (i.e. empty set)
*/
}
/*
* Determine the smallest and largest CPU id in the set. Returns
* CPUSET_NOTINSET in smallest and largest when set is empty.
*/
}
/*
* Atomic cpuset operations
* These are safe to use for concurrent cpuset manipulations.
* "xdel" and "xadd" are exclusive operations, that set "result" to "0"
* if the add or del was successful, or "-1" if not successful.
* (e.g. attempting to add a cpu to a cpuset that's already there, or
* deleting a cpu that's not in the cpuset)
*/
int _i; \
}
int _i; \
}
#define CPUSET_ZERO(set) { \
int _i; \
}
}
}
#else /* CPUSET_WORDS <= 0 */
#endif /* CPUSET_WORDS */
extern cpuset_t cpu_seqid_inuse;
#endif /* (_KERNEL || _KMEMUSER) && _MACHDEP */
#define CPU_CPR_OFFLINE 0x0
#define CPU_CPR_ONLINE 0x1
extern int ncpus; /* number of CPUs present */
extern int ncpus_online; /* number of CPUs not quiesced */
extern int max_ncpus; /* max present before ncpus is known */
extern int boot_max_ncpus; /* like max_ncpus but for real */
#else
#endif
/*
* CPU_CURRENT indicates to thread_affinity_set to use CPU->cpu_id
* as the target and to grab cpu_lock instead of requiring the caller
* to grab it.
*/
#define CPU_CURRENT -3
/*
* Per-CPU statistics
*
* cpu_stats_t contains numerous system and VM-related statistics, in the form
* of gauges or monotonically-increasing event occurrence counts.
*/
#define CPU_STATS_ENTER_K() kpreempt_disable()
#define CPU_STATS_EXIT_K() kpreempt_enable()
{ kpreempt_disable(); /* keep from switching CPUs */\
kpreempt_enable(); \
}
}
#endif /* _KERNEL || _KMEMUSER */
/*
* CPU support routines.
*/
struct zone;
void cpu_list_init(cpu_t *);
void cpu_add_unit(cpu_t *);
void cpu_del_unit(int cpuid);
void cpu_add_active(cpu_t *);
void cpu_kstat_init(cpu_t *);
void cpu_create_intrstat(cpu_t *);
void cpu_delete_intrstat(cpu_t *);
int cpu_kstat_intrstat_update(kstat_t *, int);
void cpu_intr_swtch_enter(kthread_t *);
void cpu_intr_swtch_exit(kthread_t *);
void mbox_lock_init(void); /* initialize cross-call locks */
/*
* values for safe_list. Pause state that CPUs are in.
*/
#define PAUSE_IDLE 0 /* normal state */
void mach_cpu_pause(volatile char *);
void start_cpus(void);
int cpus_paused(void);
void cpu_pause_init(void);
/*
* Routines for checking CPU states.
*/
/*
* The processor_info(2) state of a CPU is a simplified representation suitable
* for use by an application program. Kernel subsystems should utilize the
* internal per-CPU state as given by the cpu_flags member of the cpu structure,
* as this information may include platform- or architecture-specific state
* critical to a subsystem's disposition of a particular CPU.
*/
int cpu_configure(int);
int cpu_unconfigure(int);
extern void thread_affinity_clear(kthread_t *t);
extern void affinity_set(int cpu_id);
extern void affinity_clear(void);
extern void init_cpu_mstate(struct cpu *, int);
extern void term_cpu_mstate(struct cpu *);
extern void new_cpu_mstate(int, hrtime_t);
extern void thread_nomigrate(void);
extern void thread_allowmigrate(void);
extern void weakbinding_stop(void);
extern void weakbinding_start(void);
/*
* The following routines affect the CPUs participation in interrupt processing,
* if that is applicable on the architecture. This only affects interrupts
* which aren't directed at the processor (not cross calls).
*
* cpu_disable_intr returns non-zero if interrupts were previously enabled.
*/
/*
* The mutex cpu_lock protects cpu_flags for all CPUs, as well as the ncpus
* and ncpus_online counts.
*/
typedef enum {
} cpu_setup_t;
typedef int cpu_setup_func_t(cpu_setup_t, int, void *);
/*
* Routines used to register interest in cpu's being added to or removed
* from the system.
*/
extern void register_cpu_setup_func(cpu_setup_func_t *, void *);
extern void unregister_cpu_setup_func(cpu_setup_func_t *, void *);
extern void cpu_state_change_notify(int, cpu_setup_t);
/*
* Create various strings that describe the given CPU for the
* processor_info system call and configuration-related kstats.
*/
#define CPU_IDSTRLEN 100
extern void init_cpu_info(struct cpu *);
extern void cpu_vm_data_init(struct cpu *);
extern void cpu_vm_data_destroy(struct cpu *);
#endif /* _KERNEL */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CPUVAR_H */