mp_machdep.c revision fa96bd918a96f4ac299dc0816aac8a0d40cf1ee7
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2009, Intel Corporation.
* All rights reserved.
*/
#define PSMI_1_6
#include <sys/smp_impldefs.h>
#include <sys/psm_modctl.h>
#include <sys/x86_archext.h>
#include <sys/cpu_event.h>
#include <sys/archsystm.h>
#include <sys/machsystm.h>
#include <sys/sysmacros.h>
#if defined(__xpv)
#include <sys/hypervisor.h>
#endif
#include <sys/mach_intr.h>
#include <sys/kdi_machimpl.h>
/*
* Local function prototypes
*/
static void mach_init();
static void mach_picinit();
static uint64_t mach_getcpufreq(void);
static void mach_fixcpufreq(void);
static int mach_clkinit(int, int *);
static void mach_smpinit(void);
static int mach_softlvl_to_vect(int ipl);
static void mach_get_platform(int owner);
static void mach_construct_info();
psm_intr_op_t, int *);
static hrtime_t dummy_hrtime(void);
static void dummy_scalehrtime(hrtime_t *);
void cpu_idle(void);
static void cpu_wakeup(cpu_t *, int);
#ifndef __xpv
void cpu_idle_mwait(void);
static void cpu_wakeup_mwait(cpu_t *, int);
#endif
/*
* External reference functions
*/
extern void return_instr();
#if defined(__i386)
#endif
extern void pc_gethrestime(timestruc_t *);
extern int cpuid_get_coreid(cpu_t *);
extern int cpuid_get_chipid(cpu_t *);
/*
* PSM functions initialization
*/
void (*psm_shutdownf)(int, int) = (void (*)(int, int))return_instr;
void (*psm_preshutdownf)(int, int) = (void (*)(int, int))return_instr;
void (*psm_notifyf)(int) = (void (*)(int))return_instr;
void (*psm_set_idle_cpuf)(int) = (void (*)(int))return_instr;
void (*psm_unset_idle_cpuf)(int) = (void (*)(int))return_instr;
void (*picinitf)() = return_instr;
int (*clkinitf)(int, int *) = (int (*)(int, int *))return_instr;
int (*ap_mlsetup)() = (int (*)(void))return_instr;
void (*send_dirintf)() = return_instr;
void (*setspl)(int) = (void (*)(int))return_instr;
int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr;
int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr;
void (*kdisetsoftint)(int, struct av_softinfo *)=
(void (*)(int, struct av_softinfo *))return_instr;
void (*setsoftint)(int, struct av_softinfo *)=
(void (*)(int, struct av_softinfo *))return_instr;
int (*slvltovect)(int) = (int (*)(int))return_instr;
int (*setlvl)(int, int *) = (int (*)(int, int *))return_instr;
void (*setlvlx)(int, int) = (void (*)(int, int))return_instr;
int (*psm_disable_intr)(int) = mp_disable_intr;
void (*psm_enable_intr)(int) = mp_enable_intr;
void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL;
int (*psm_get_clockirq)(int) = NULL;
int (*psm_get_ipivect)(int, int) = NULL;
int (*psm_clkinit)(int) = NULL;
void (*psm_timer_enable)(void) = NULL;
void (*psm_timer_disable)(void) = NULL;
int *) = mach_intr_ops;
void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr;
void (*hrtime_tick)(void) = return_instr;
/*
* True if the generic TSC code is our source of hrtime, rather than whatever
* the PSM can provide.
*/
#ifdef __xpv
int tsc_gethrtime_enable = 0;
#else
int tsc_gethrtime_enable = 1;
#endif
int tsc_gethrtime_initted = 0;
/*
* True if the hrtime implementation is "hires"; namely, better than microdata.
*/
int gethrtime_hires = 0;
/*
* Local Static Data
*/
/*
* virtualization support for psm
*/
void *psm_vt_ops = NULL;
/*
* If non-zero, idle cpus will become "halted" when there's
* no work to do.
*/
int idle_cpu_use_hlt = 1;
#ifndef __xpv
/*
* If non-zero, idle cpus will use mwait if available to halt instead of hlt.
*/
int idle_cpu_prefer_mwait = 1;
/*
* Set to 0 to avoid MONITOR+CLFLUSH assertion.
*/
int idle_cpu_assert_cflush_monitor = 1;
/*
* If non-zero, idle cpus will not use power saving Deep C-States idle loop.
*/
int idle_cpu_no_deep_c = 0;
/*
* Non-power saving idle loop and wakeup pointers.
*/
void (*non_deep_idle_cpu)() = cpu_idle;
void (*non_deep_idle_disp_enq_thread)(cpu_t *, int);
/*
* Object for the kernel to access the HPET.
*/
#endif /* ifndef __xpv */
/*ARGSUSED*/
int
{
switch (hw) {
case PGHW_IPIPE:
if (x86_feature & (X86_HTT)) {
/*
* Hyper-threading is SMT
*/
return (1);
} else {
return (0);
}
case PGHW_CHIP:
return (1);
else
return (0);
case PGHW_CACHE:
return (1);
else
return (0);
case PGHW_POW_ACTIVE:
return (1);
else
return (0);
case PGHW_POW_IDLE:
return (1);
else
return (0);
default:
return (0);
}
}
/*
* Compare two CPUs and see if they have a pghw_type_t sharing relationship
* If pghw_type_t is an unsupported hardware type, then return -1
*/
int
{
return (-1);
}
/*
* Return a physical instance identifier for known hardware sharing
* relationships
*/
{
switch (hw) {
case PGHW_IPIPE:
return (cpuid_get_coreid(cpu));
case PGHW_CACHE:
return (cpuid_get_last_lvl_cacheid(cpu));
case PGHW_CHIP:
return (cpuid_get_chipid(cpu));
case PGHW_POW_ACTIVE:
case PGHW_POW_IDLE:
default:
return (-1);
}
}
/*
* Express preference for optimizing for sharing relationship
* hw1 vs hw2
*/
{
static pghw_type_t hw_hier[] = {
};
for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) {
rank1 = i;
rank2 = i;
}
return (hw1);
else
return (hw2);
}
/*
* Override the default CMT dispatcher policy for the specified
* hardware sharing relationship
*/
{
/*
* For shared caches, also load balance across them to
* maximize aggregate cache capacity
*/
switch (hw) {
case PGHW_CACHE:
return (CMT_BALANCE|CMT_AFFINITY);
default:
return (CMT_NO_POLICY);
}
}
{
}
void
cmp_set_nosteal_interval(void)
{
/* Set the nosteal interval (used by disp_getbest()) to 100us */
nosteal_nsec = 100000UL;
}
/*
* Routine to ensure initial callers to hrtime gets 0 as return
*/
static hrtime_t
dummy_hrtime(void)
{
return (0);
}
/* ARGSUSED */
static void
{}
/*
* Supports Deep C-State power saving idle loop.
*/
void
cpu_idle_adaptive(void)
{
}
/*
* Function called by CPU idle notification framework to check whether CPU
* has been awakened. It will be called with interrupt disabled.
* If CPU has been awakened, call cpu_idle_exit() to notify CPU idle
* notification framework.
*/
/*ARGSUSED*/
static void
cpu_idle_check_wakeup(void *arg)
{
/*
* Toggle interrupt flag to detect pending interrupts.
* If interrupt happened, do_interrupt() will notify CPU idle
* notification framework so no need to call cpu_idle_exit() here.
*/
sti();
SMT_PAUSE();
cli();
}
/*
* Idle the present CPU until wakened via an interrupt
*/
void
cpu_idle(void)
{
int hset_update = 1;
/*
* If this CPU is online, and there's multiple CPUs
* in the system, then we should notate our halting
* by adding ourselves to the partition's halted CPU
* work becomes available.
*/
hset_update = 0;
/*
* Add ourselves to the partition's halted CPUs bitmap
* and set our HALTED flag, if necessary.
*
* When a thread becomes runnable, it is placed on the queue
* and then the halted CPU bitmap is checked to determine who
* (if anyone) should be awakened. We therefore need to first
* add ourselves to the bitmap, and and then check if there
* is any work available. The order is important to prevent a race
* that can lead to work languishing on a run queue somewhere while
* this CPU remains halted.
*
* Either the producing CPU will see we're halted and will awaken us,
* or this CPU will see the work available in disp_anywork().
*
* Note that memory barriers after updating the HALTED flag
* are not necessary since an atomic operation (updating the bitset)
* immediately follows. On x86 the atomic operation acts as a
* memory barrier for the update of cpu_disp_flags.
*/
if (hset_update) {
}
/*
* Check to make sure there's really nothing to do.
* Work destined for this CPU may become available after
* this check. We'll be notified through the clearing of our
* bit in the halted CPU bitmap, and a poke.
*/
if (disp_anywork()) {
if (hset_update) {
}
return;
}
/*
* We're on our way to being halted.
*
* Disable interrupts now, so that we'll awaken immediately
* after halting if someone tries to poke us between now and
* the time we actually halt.
*
* We check for the presence of our bit after disabling interrupts.
* If it's cleared, we'll return. If the bit is cleared after
* we check then the poke will pop us out of the halted state.
*
* This means that the ordering of the poke and the clearing
* of the bit by cpu_wakeup is important.
* cpu_wakeup() must clear, then poke.
* cpu_idle() must disable interrupts, then check for the bit.
*/
cli();
sti();
return;
}
/*
* The check for anything locally runnable is here for performance
* and isn't needed for correctness. disp_nrunnable ought to be
* in our cache still, so it's inexpensive to check, and if there
* is anything runnable we won't have to wait for the poke.
*/
if (hset_update) {
}
sti();
return;
}
if (cpu_idle_enter(IDLE_STATE_C1, 0,
cpu_idle_check_wakeup, NULL) == 0) {
}
/*
* We're no longer halted
*/
if (hset_update) {
}
}
/*
* If "cpu" is halted, then wake it up clearing its halted bit in advance.
* Otherwise, see if other CPUs in the cpu partition are halted and need to
* be woken up so that they can steal the thread we placed on this CPU.
* This function is only used on MP systems.
*/
static void
{
/*
* Clear the halted bit for that CPU since it will be
* poked in a moment.
*/
/*
* We may find the current CPU present in the halted cpuset
* if we're in the context of an interrupt that occurred
* before we had a chance to clear our bit in cpu_idle().
* Poking ourself is obviously unnecessary, since if
* we're here, we're not halted.
*/
return;
} else {
/*
* This cpu isn't halted, but it's idle or undergoing a
* context switch. No need to awaken anyone else.
*/
return;
}
/*
* No need to wake up other CPUs if this is for a bound thread.
*/
if (bound)
return;
/*
* The CPU specified for wakeup isn't currently halted, so check
* to see if there are any other halted CPUs in the partition,
* and if there are then awaken one.
*/
do {
return;
}
}
#ifndef __xpv
/*
* Function called by CPU idle notification framework to check whether CPU
* has been awakened. It will be called with interrupt disabled.
* If CPU has been awakened, call cpu_idle_exit() to notify CPU idle
* notification framework.
*/
static void
cpu_idle_mwait_check_wakeup(void *arg)
{
if (*mcpu_mwait != MWAIT_HALTED) {
/*
* CPU has been awakened, notify CPU idle notification system.
*/
} else {
/*
* Toggle interrupt flag to detect pending interrupts.
* If interrupt happened, do_interrupt() will notify CPU idle
* notification framework so no need to call cpu_idle_exit()
* here.
*/
sti();
SMT_PAUSE();
cli();
}
}
/*
* Idle the present CPU until awakened via touching its monitored line
*/
void
cpu_idle_mwait(void)
{
int hset_update = 1;
/*
* Set our mcpu_mwait here, so we can tell if anyone tries to
* wake us between now and when we call mwait. No other cpu will
* attempt to set our mcpu_mwait until we add ourself to the halted
* CPU bitmap.
*/
/*
* If this CPU is online, and there's multiple CPUs
* in the system, then we should note our halting
* by adding ourselves to the partition's halted CPU
* work becomes available.
*/
hset_update = 0;
/*
* Add ourselves to the partition's halted CPUs bitmap
* and set our HALTED flag, if necessary.
*
* When a thread becomes runnable, it is placed on the queue
* and then the halted CPU bitmap is checked to determine who
* (if anyone) should be awakened. We therefore need to first
* add ourselves to the bitmap, and and then check if there
* is any work available.
*
* Note that memory barriers after updating the HALTED flag
* are not necessary since an atomic operation (updating the bitmap)
* immediately follows. On x86 the atomic operation acts as a
* memory barrier for the update of cpu_disp_flags.
*/
if (hset_update) {
}
/*
* Check to make sure there's really nothing to do.
* Work destined for this CPU may become available after
* this check. We'll be notified through the clearing of our
* bit in the halted CPU bitmap, and a write to our mcpu_mwait.
*
* disp_anywork() checks disp_nrunnable, so we do not have to later.
*/
if (disp_anywork()) {
if (hset_update) {
}
return;
}
/*
* We're on our way to being halted.
* To avoid a lost wakeup, arm the monitor before checking if another
* cpu wrote to mcpu_mwait to wake us up.
*/
i86_monitor(mcpu_mwait, 0, 0);
if (*mcpu_mwait == MWAIT_HALTED) {
if (cpu_idle_enter(IDLE_STATE_C1, 0,
cpu_idle_mwait_check_wakeup, (void *)mcpu_mwait) == 0) {
if (*mcpu_mwait == MWAIT_HALTED) {
i86_mwait(0, 0);
}
}
}
/*
* We're no longer halted
*/
if (hset_update) {
}
}
/*
* If "cpu" is halted in mwait, then wake it up clearing its halted bit in
* advance. Otherwise, see if other CPUs in the cpu partition are halted and
* need to be woken up so that they can steal the thread we placed on this CPU.
* This function is only used on MP systems.
*/
static void
{
/*
* Clear the halted bit for that CPU since it will be woken up
* in a moment.
*/
/*
* Clear the halted bit for that CPU since it will be
* poked in a moment.
*/
/*
* We may find the current CPU present in the halted cpuset
* if we're in the context of an interrupt that occurred
* before we had a chance to clear our bit in cpu_idle().
* Waking ourself is obviously unnecessary, since if
* we're here, we're not halted.
*
* harmless and less expensive than always checking if we
* are waking ourself which is an uncommon case.
*/
return;
} else {
/*
* This cpu isn't halted, but it's idle or undergoing a
* context switch. No need to awaken anyone else.
*/
return;
}
/*
* No need to wake up other CPUs if the thread we just enqueued
* is bound.
*/
return;
/*
* See if there's any other halted CPUs. If there are, then
* select one, and awaken it.
* It's possible that after we find a CPU, somebody else
* will awaken it before we get the chance.
* In that case, look again.
*/
do {
return;
cpu_found) < 0);
/*
* wakeup is cheap.
*/
}
#endif
void (*cpu_pause_handler)(volatile char *) = NULL;
static int
mp_disable_intr(int cpun)
{
/*
* switch to the offline cpu
*/
/*
* raise ipl to just below cross call
*/
/*
* set base spl to prevent the next swtch to idle from
* lowering back to ipl 0
*/
set_base_spl();
return (DDI_SUCCESS);
}
static void
mp_enable_intr(int cpun)
{
/*
* switch to the online cpu
*/
/*
* clear the interrupt active mask
*/
set_base_spl();
(void) spl0();
}
static void
mach_get_platform(int owner)
{
void **srv_opsp;
void **clt_opsp;
int i;
int total_ops;
/* fix up psm ops */
total_ops = sizeof (struct psm_ops_ver01) /
sizeof (void (*)(void));
/* no psm_notify_func */
sizeof (void (*)(void));
/* no psm_timer funcs */
sizeof (void (*)(void));
/* no psm_preshutdown function */
sizeof (void (*)(void));
/* no psm_preshutdown function */
sizeof (void (*)(void));
else
/*
* Save the version of the PSM module, in case we need to
* behave differently based on version.
*/
for (i = 0; i < total_ops; i++)
}
static void
{
int conflict_owner = 0;
panic("No valid PSM modules found");
continue;
}
/* check to see are there any conflicts */
if (conflict_owner) {
/* remove all psm modules except uppc */
"Conflicts detected on the following PSM modules:");
}
"Setting the system back to SINGLE processor mode!");
return;
}
if (mach_set[PSM_OWN_EXCLUSIVE])
if (mach_set[PSM_OWN_OVERRIDE])
}
static void
{
/* register the interrupt and clock initialization rotuines */
/* register the interrupt setup code */
if (pops->psm_translate_irq)
if (pops->psm_intr_ops)
/*
* Time-of-day functionality now handled in TOD modules.
* (Warn about PSM modules that think that we're going to use
* their ops vectors.)
*/
if (pops->psm_tod_get)
(void *)pops->psm_tod_get);
if (pops->psm_tod_set)
(void *)pops->psm_tod_set);
#endif
if (pops->psm_notify_error) {
}
(*pops->psm_softinit)();
/*
* Initialize the dispatcher's function hooks to enable CPU halting
* when idle. Set both the deep-idle and non-deep-idle hooks.
*
* Assume we can use power saving deep-idle loop cpu_idle_adaptive.
* Platform deep-idle driver will reset our idle loop to
* non_deep_idle_cpu if power saving deep-idle feature is not available.
*
* or idle_cpu_prefer_mwait is not set.
*/
#ifndef __xpv
#endif
if (idle_cpu_use_hlt) {
#ifndef __xpv
/*
* Protect ourself from insane mwait size.
*/
#ifdef DEBUG
"handle cpu 0 mwait size.");
#endif
} else {
}
} else {
}
/*
* Disable power saving deep idle loop?
*/
if (idle_cpu_no_deep_c) {
}
#endif
}
mach_smpinit();
}
static void
mach_smpinit(void)
{
int cnt;
cpu_id = -1;
}
/* MP related routines */
/* optional MP related routines */
if (pops->psm_shutdown)
if (pops->psm_preshutdown)
if (pops->psm_notify_func)
if (pops->psm_set_idlecpu)
if (pops->psm_unset_idlecpu)
if (pops->psm_timer_reprogram)
if (pops->psm_timer_enable)
if (pops->psm_timer_disable)
if (pops->psm_post_cyclic_setup)
/*
* on UP machines.
*/
if (pops->psm_disable_intr)
if (pops->psm_enable_intr)
/* check for multiple CPUs */
if (cnt < 2)
return;
/* check for MP platforms */
return;
/*
* Set the dispatcher hook to enable cpu "wake up"
* when a thread becomes runnable.
*/
if (idle_cpu_use_hlt) {
#ifndef __xpv
#endif
}
}
static void
{
/* register the interrupt handlers */
/* initialize the interrupt hardware */
(*pops->psm_picinit)();
/* set interrupt mask for current ipl */
cli();
}
#define MEGA_HZ 1000000
#ifdef __xpv
int xpv_cpufreq_workaround = 1;
int xpv_cpufreq_verbose = 0;
#else /* __xpv */
static uint64_t
{
if ((pit_counter == 0) || (*processor_clks == 0) ||
return (0);
return (cpu_hz);
}
#endif /* __xpv */
static uint64_t
mach_getcpufreq(void)
{
#if defined(__xpv)
/*
* During dom0 bringup, it was noted that on at least one older
* Intel HT machine, the hypervisor initially gives a tsc_to_system_mul
* value that is quite wrong (the 3.06GHz clock was reported
* as 4.77GHz)
*
* The curious thing is, that if you stop the kernel at entry,
* breakpoint here and inspect the value with kmdb, the value
* is correct - but if you don't stop and simply enable the
* printf statement (below), you can see the bad value printed
* here. Almost as if something kmdb did caused the hypervisor to
* figure it out correctly. And, note that the hypervisor
* eventually -does- figure it out correctly ... if you look at
* the field later in the life of dom0, it is correct.
*
* For now, on dom0, we employ a slightly cheesy workaround of
* using the DOM0_PHYSINFO hypercall.
*/
if (HYPERVISOR_sysctl(op) != 0)
panic("physinfo op refused");
} else {
else
}
if (xpv_cpufreq_verbose)
printf("mach_getcpufreq: system_mul 0x%x, shift %d, "
return (cpu_hz);
#else /* __xpv */
if (x86_feature & X86_TSC) {
/*
* We have a TSC. freq_tsc() knows how to measure the number
* of clock cycles sampled against the PIT.
*/
#if defined(__amd64)
panic("mach_getcpufreq: no TSC!");
/*
* We are a Cyrix based on a 6x86 core or an Intel Pentium
* for which freq_notsc() knows how to measure the number of
* elapsed clock cycles sampled against the PIT
*/
#endif /* __i386 */
}
/* We do not know how to calculate cpu frequency for this cpu. */
return (0);
#endif /* __xpv */
}
/*
* If the clock speed of a cpu is found to be reported incorrectly, do not add
* to this array, instead improve the accuracy of the algorithm that determines
* the clock speed of the processor or extend the implementation to support the
* vendor as appropriate. This is here only to support adjusting the speed on
* older slower processors that mach_fixcpufreq() would not be able to account
* for otherwise.
*/
/*
* On fast processors the clock frequency that is measured may be off by
* a few MHz from the value printed on the part. This is a combination of
* the factors that for such fast parts being off by this much is within
* the tolerances for manufacture and because of the difficulties in the
* measurement that can lead to small error. This function uses some
* heuristics in order to tweak the value that was measured to match what
* is most likely printed on the part.
*
* Some examples:
* AMD Athlon 1000 mhz measured as 998 mhz
* Intel Pentium III Xeon 733 mhz measured as 731 mhz
* Intel Pentium IV 1500 mhz measured as 1495mhz
*
* If in the future this function is no longer sufficient to correct
* for the error in the measurement, then the algorithm used to perform
* the measurement will have to be improved in order to increase accuracy
* rather than adding horrible and questionable kludges here.
*
* This is called after the cyclics subsystem because of the potential
* that the heuristics within may give a worse estimate of the clock
* frequency than the value that was measured.
*/
static void
mach_fixcpufreq(void)
{
/*
* Find the nearest integer multiple of 200/3 (about 66) MHz to the
* measured speed taking into account that the 667 MHz parts were
* the first to round-up.
*/
/* Find the nearest integer multiple of 50 MHz to the measured speed */
/* Find the closer of the two */
} else {
}
return;
/*
* Some older parts have a core clock frequency that is not an
* integral multiple of 50 or 66 MHz. Check if one of the old
* clock frequencies is closer to the measured value than any
* of the integral multiples of 50 an 66, and if so set fixed
* and delta appropriately to represent the closest value.
*/
i = sizeof (x86_cpu_freq) / sizeof (int);
while (i > 0) {
i--;
if (x86_cpu_freq[i] <= freq) {
fixed = x86_cpu_freq[i];
}
break;
}
fixed = x86_cpu_freq[i];
}
}
/*
* Set a reasonable maximum for how much to correct the measured
* result by. This check is here to prevent the adjustment made
* by this function from being more harm than good. It is entirely
* possible that in the future parts will be made that are not
* integral multiples of 66 or 50 in clock frequency or that
* someone may overclock a part to some odd frequency. If the
* measured value is farther from the corrected value than
* allowed, then assume the corrected value is in error and use
* the measured value.
*/
if (6 < delta)
return;
}
static int
{
/* Round to nearest MHZ */
return (0);
return ((int)cpu_mhz);
}
static int
{
int resolution;
tsc_gethrtime_enable = 0;
#ifndef __xpv
if (tsc_gethrtime_enable) {
} else
#endif
{
if (pops->psm_hrtimeinit)
(*pops->psm_hrtimeinit)();
/* scalehrtimef will remain dummy */
}
if (mach_ver[0] >= PSM_INFO_VER01_3) {
if (preferred_mode == TIMER_ONESHOT) {
if (resolution != 0) {
return (resolution);
}
}
/*
* either periodic mode was requested or could not set to
* one-shot mode
*/
/*
* psm should be able to do periodic, so we do not check
* for return value of psm_clkinit here.
*/
return (resolution);
} else {
/*
* PSMI interface prior to PSMI_3 does not define a return
* value for psm_clkinit, so the return value is ignored.
*/
return (nsec_per_tick);
}
}
/*ARGSUSED*/
static int
mach_softlvl_to_vect(int ipl)
{
return (PSM_SV_SOFTWARE);
}
#ifdef DEBUG
/*
* This is here to allow us to simulate cpus that refuse to start.
*/
#endif
int
{
#ifdef DEBUG
return (0);
#endif
}
int
{
#ifdef DEBUG
return (0);
#endif
}
/*
* Default handler to create device node for CPU.
* One reference count will be held on created device node.
*/
static int
{
static kmutex_t cpu_node_lock;
if (cpu_nex_devi == NULL) {
/* First check whether cpus exists. */
/* Create cpus if it doesn't exist. */
if (cpu_nex_devi == NULL) {
if (rv != NDI_SUCCESS) {
"?failed to create cpu nexus device.\n");
return (PSM_FAILURE);
}
(void) ndi_devi_online(dip, 0);
cpu_nex_devi = dip;
}
}
/*
* create a child node for cpu identified as 'cpu_id'
*/
rv = PSM_FAILURE;
} else {
(void) ndi_hold_devi(dip);
rv = PSM_SUCCESS;
}
return (rv);
}
/*
* Create cpu device node in device tree and online it.
* Return created dip with reference count held if requested.
*/
int
{
int rv;
if (rv == PSM_SUCCESS) {
/* Recursively attach driver for parent nexus device. */
DDI_SUCCESS) {
/* Configure cpu itself and descendants. */
(void) ndi_devi_online(dip,
}
} else {
(void) ndi_rele_devi(dip);
}
}
return (rv);
}
/*ARGSUSED*/
static int
{
return (irqno); /* default to NO translation */
}
static void
{
/*
* SL_FATAL is pass in once panicstr is set, deliver it
* as CE_PANIC. Also, translate SL_ codes back to CE_
* codes for the psmi handler
*/
else if (level & SL_CONSOLE)
}
/*
* It provides the default basic intr_ops interface for the new DDI
* interrupt framework if the PSM doesn't have one.
*
* Input:
* dip - pointer to the dev_info structure of the requested device
* hdlp - pointer to the internal interrupt handle structure for the
* requested interrupt
* intr_op - opcode for this call
* result - pointer to the integer that will hold the result to be
* passed back if return value is PSM_SUCCESS
*
* Output:
* return value is either PSM_SUCCESS or PSM_FAILURE
*/
static int
{
switch (intr_op) {
case PSM_INTR_OP_CHECK_MSI:
break;
*result = 1;
else
*result = 0;
break;
case PSM_INTR_OP_FREE_VECTORS:
break;
*result = 1;
else
*result = 0;
break;
case PSM_INTR_OP_XLATE_VECTOR:
break;
case PSM_INTR_OP_GET_CAP:
*result = 0;
break;
case PSM_INTR_OP_GET_PENDING:
case PSM_INTR_OP_CLEAR_MASK:
case PSM_INTR_OP_SET_MASK:
case PSM_INTR_OP_GET_SHARED:
case PSM_INTR_OP_SET_PRI:
case PSM_INTR_OP_SET_CAP:
case PSM_INTR_OP_SET_CPU:
case PSM_INTR_OP_GET_INTR:
default:
return (PSM_FAILURE);
}
return (PSM_SUCCESS);
}
/*
* Return 1 if CMT load balancing policies should be
* implemented across instances of the specified hardware
* sharing relationship.
*/
int
{
if (hw == PGHW_IPIPE ||
return (1);
else
return (0);
}
/*
* Return 1 if thread affinity polices should be implemented
* for instances of the specifed hardware sharing relationship.
*/
int
{
if (hw == PGHW_CACHE)
return (1);
else
return (0);
}