cpr_impl.c revision 58865bb7f764a6ca11f3057bee77153724ebb239
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* Platform specific implementation code
* Currently only suspend to RAM is supported (ACPI S3)
*/
#define SUNDDI_IMPL
#include <sys/prom_isa.h>
#include <sys/prom_plat.h>
#include <vm/seg_kmem.h>
#include <sys/cpu_module.h>
#include <sys/machsystm.h>
#include <sys/archsystm.h>
#include <sys/bootconf.h>
#include <sys/smp_impldefs.h>
#include <sys/cpr_wakecode.h>
#include <sys/x86_archext.h>
#include <sys/sysmacros.h>
#define AFMT "%lx"
extern int flushes_require_xcalls;
extern cpuset_t cpu_ready_set;
#if defined(__amd64)
extern void *wc_long_mode_64(void);
#endif /* __amd64 */
extern int tsc_gethrtime_enable;
extern void i_cpr_start_cpu(void);
void (*cpr_start_cpu_func)(void) = i_cpr_start_cpu;
static void
#ifdef STACK_GROWTH_DOWN
#define CPR_GET_STACK_START(t) ((t)->t_stkbase)
#define CPR_GET_STACK_END(t) ((t)->t_stk)
#else
#define CPR_GET_STACK_START(t) ((t)->t_stk)
#define CPR_GET_STACK_END(t) ((t)->t_stkbase)
#endif /* STACK_GROWTH_DOWN */
/*
* restart paused slave cpus
*/
void
i_cpr_machdep_setup(void)
{
if (ncpus > 1) {
start_cpus();
}
}
/*
* Stop all interrupt activities in the system
*/
void
i_cpr_stop_intr(void)
{
(void) spl7();
}
/*
* Set machine up to take interrupts
*/
void
i_cpr_enable_intr(void)
{
(void) spl0();
}
/*
* Save miscellaneous information which needs to be written to the
* state file. This information is required to re-initialize
*/
void
i_cpr_save_machdep_info(void)
{
int notcalled = 0;
}
void
i_cpr_set_tbr(void)
{
}
i_cpr_bootcpuid(void)
{
return (0);
}
/*
* cpu0 should contain bootcpu info
*/
cpu_t *
i_cpr_bootcpu(void)
{
return (cpu_get(i_cpr_bootcpuid()));
}
/*
* Save context for the specified CPU
*/
void *
i_cpr_save_context(void *arg)
{
int resuming;
int ret;
/*
* wc_save_context returns twice, once when susending and
* once when resuming, wc_save_context() returns 0 when
* suspending and non-zero upon resume
*/
/*
* do NOT call any functions after this point, because doing so
* will modify the stack that we are running on
*/
if (resuming) {
/*
* Enable interrupts on this cpu.
* Do not bind interrupts to this CPU's local APIC until
* the CPU is ready to receive interrupts.
*/
/*
* Setting the bit in cpu_ready_set must be the last operation
* in processor initialization; the boot CPU will continue to
* boot once it sees this bit set for all active CPUs.
*/
("i_cpr_save_context() resuming cpu %d in cpu_ready_set\n",
} else {
/*
* Disable interrupts on this CPU so that PSM knows not to bind
* interrupts here on resume until the CPU has executed
* cpu_enable_intr() (above) in the resume path.
* We explicitly do not grab cpu_lock here because at this point
* in the suspend process, the boot cpu owns cpu_lock and all
* other cpus are also executing in the pause thread (only
* modifying their respective CPU structure).
*/
(void) cpu_disable_intr(CPU);
}
resuming))
return (NULL);
}
static ushort_t *
{
/*LINTED*/
return (NULL);
/*
* setup secondary cpu bios boot up vector
*/
/*LINTED*/
return (warm_reset_vector);
}
void
{
/*
* this is a cut down version of start_other_cpus()
* just do the initialization to wake the other cpus
*/
unsigned who;
int boot_cpuid = i_cpr_bootcpuid();
uint32_t code_length = 0;
/*LINTED*/
char *str = "i_cpr_pre_resume_cpus";
extern int get_tsc_ready();
int err;
/*LINTED*/
/*
* If startup wasn't able to find a page under 1M, we cannot
* proceed.
*/
if (rm_platter_va == 0) {
"memory below 1M could be found for processor startup");
return;
}
/*
* Copy the real mode code at "real_mode_start" to the
* page at rm_platter_va.
*/
if (warm_reset_vector == NULL) {
return;
}
/*
* We lock our affinity to the master CPU to ensure that all slave CPUs
* do their TSC syncs with the same CPU.
*/
/*
* Mark the boot cpu as being ready and in the procset, since we are
* running on that cpu.
*/
if (who == boot_cpuid)
continue;
continue;
#if defined(__amd64)
#else
code_length = 0;
#endif
if (err != 0) {
continue;
}
continue;
if (tsc_gethrtime_enable) {
}
who))
/*
* Wait for cpu to declare that it is ready, we want the
* cpus to start serially instead of in parallel, so that
* they do not contend with each other in wc_rm_start()
*/
continue;
/*
* do not need to re-initialize dtrace using dtrace_cpu_init
* function
*/
}
}
static void
{
}
/*
* We need to setup a 1:1 (virtual to physical) mapping for the
* page containing the wakeup code.
*/
static void
{
}
void
{
if (warm_reset_vector != NULL)
/*
* cmi_post_mpstartup() is only required upon boot not upon
* resume from RAM
*/
/* Tear down 1:1 mapping for wakeup code */
}
/* ARGSUSED */
void
i_cpr_handle_xc(int flag)
{
}
int
i_cpr_reusable_supported(void)
{
return (0);
}
static void
{
HAT_LOAD);
}
void
{
int who;
if (ncpus == 1) {
"uniprocessor machine\n"))
return;
}
continue;
"idt=%p:%x, ldt=%lx, tr=%lx, kgsbase="
}
}
/*
* Power down the system.
*/
int
{
uint32_t code_length = 0;
/*LINTED*/
/*LINTED*/
int ret = 0;
char *str = "i_cpr_power_down";
#if defined(__amd64)
/*LINTED*/
#endif
extern int cpr_suspend_succeeded;
extern void kernel_wc_code();
return (ENOTTY);
}
saved_intr = intr_clear();
/* Setup 1:1 mapping for wakeup code */
/* Copy code to rm_platter */
#if defined(__amd64)
/*
* Since the CPU needs to jump to protected mode using an identity
* mapped address, we need to calculate it here.
*/
#endif
if (wc_save_context(cpup)) {
if (ret != 0)
return (ret);
if (ret != 0)
return (ret);
#if defined(__amd64)
#else
code_length = 0;
#endif
#if defined(__amd64)
("real_mode_platter->rm_temp_gdt[TEMPGDT_KCODE64]=%lx\n",
#endif
(long)cpup->wc_kgsbase))
/*
* If it works, we get control back to the else branch below
* If we get control back here, it didn't work.
* XXX return EINVAL here?
*/
return (ret);
} else {
/*
* the restore should never fail, if the saved suceeded
*/
/*
* Enable interrupts on boot cpu.
*/
return (ret);
}
}
/*
* Stop all other cpu's before halting or rebooting. We pause the cpu's
* instead of sending a cross call.
* Stolen from sun4/os/mp_states.c
*/
static int cpu_are_paused; /* sic */
void
i_cpr_stop_other_cpus(void)
{
if (cpu_are_paused) {
return;
}
cpu_are_paused = 1;
}
int
{
extern int cpr_supported_override;
extern int cpr_platform_enable;
extern int pm_S3_enabled;
return (0);
/*
* The next statement tests if a specific platform has turned off
* cpr support.
*/
return (0);
/*
* If a platform has specifically turned on cpr support ...
*/
if (cpr_platform_enable)
return (1);
return (pm_S3_enabled);
}
void
i_cpr_bitmap_cleanup(void)
{
}
void
{
}
/*
* Needed only for S3 so far
*/
static int
{
#ifdef DEBUG
char *str = "i_cpr_platform_alloc";
#endif
return (0);
}
}
/*
* Needed only for S3 so far
*/
static void
{
#ifdef DEBUG
char *str = "i_cpr_platform_free";
#endif
return;
}
}
static int
{
#ifdef DEBUG
char *str = "i_cpr_save_apic";
#endif
return (0);
}
}
static int
{
#ifdef DEBUG
char *str = "i_cpr_restore_apic";
#endif
return (0);
}
}
/* stop lint complaining about offset not being used in 32bit mode */
#if !defined(__amd64)
/*ARGSUSED*/
#endif
static void
{
/*LINTED*/
/*
* Fill up the real mode platter to make it easy for real mode code to
* kick it off. This area should really be one passed by boot to kernel
* and guaranteed to be below 1MB and aligned to 16 bytes. Should also
* have identical physical and virtual address in paged mode.
*/
#if defined(__amd64)
if (getcr3() > 0xffffffffUL)
panic("Cannot initialize CPUs; kernel's 64-bit page tables\n"
"located above 4G in physical memory (@ 0x%llx).",
(unsigned long long)getcr3());
/*
* Setup pseudo-descriptors for temporary GDT and IDT for use ONLY
* by code in real_mode_start():
*
* GDT[0]: NULL selector
* GDT[1]: 64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1
*
* Clear the IDT as interrupts will be off and a limit of 0 will cause
* the CPU to triple fault and reset on an NMI, seemingly as reasonable
* a course of action as any other, though it may cause the entire
* platform to reset in some cases...
*/
/*
* Since the CPU needs to jump to protected mode using an identity
* mapped address, we need to calculate it here.
*/
#endif /* __amd64 */
/* return; */
}
void
i_cpr_start_cpu(void)
{
char *str = "i_cpr_start_cpu";
cp->cpu_base_spl))
if (cp == i_cpr_bootcpu()) {
("%s() called on bootcpu nothing to do!\n", str))
return;
}
/*
* We need to Sync PAT with cpu0's PAT. We have to do
* this with interrupts disabled.
*/
pat_sync();
/*
* If we use XSAVE, we need to restore XFEATURE_ENABLE_MASK register.
*/
if (fp_save_mech == FP_XSAVE) {
setup_xfem();
}
/*
* Initialize this CPU's syscall handlers
*/
/*
* Do not need to call cpuid_pass2(), cpuid_pass3(), cpuid_pass4() or
* init_cpu_info(), since the work that they do is only needed to
* be done once at boot time
*/
cp->cpu_base_spl))
if (tsc_gethrtime_enable) {
}
cp->cpu_base_spl))
(void) spl0(); /* enable interrupts */
cp->cpu_base_spl))
/*
* Set up the CPU module for this CPU. This can't be done before
* this CPU is made CPU_READY, because we may (in heterogeneous systems)
* need to go load another CPU module. The act of attempting to load
* a module may trigger a cross-call, which will ASSERT unless this
* cpu is CPU_READY.
*/
/*
* cmi already been init'd (during boot), so do not need to do it again
*/
#ifdef PM_REINITMCAONRESUME
cmi_mca_init();
#endif
/* return; */
}
void
i_cpr_alloc_cpus(void)
{
char *str = "i_cpr_alloc_cpus";
/*
* we allocate this only when we actually need it to save on
* kernel memory
*/
if (wc_other_cpus == NULL) {
KM_SLEEP);
}
}
void
i_cpr_free_cpus(void)
{
int index;
if (wc_other_cpus != NULL) {
}
}
kmem_free((void *) wc_other_cpus,
}
}
/*
* wrapper for acpica_ddi_save_resources()
*/
void
{
}
/*
* wrapper for acpica_ddi_restore_resources()
*/
void
{
}
static int
{
int delays;
char *str = "wait_for_set";
if (delays == 500) {
/*
* After five seconds, things are probably
* looking a bit bleak - explain the hang.
*/
"but not running in the kernel yet", who);
"but not running in the kernel yet\n",
} else if (delays > 2000) {
/*
* We waited at least 20 seconds, bail ..
*/
return (0);
}
/*
* wait at least 10ms, then check again..
*/
drv_usecwait(10000);
}
return (1);
}
static void
{
}
}
}
void
{
}