/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* This workaround inhibits prom_printf after the cpus are grabbed.
* This can be removed when 4154263 is corrected.
*/
#define Bug_4154263
/*
* A CPR derivative specifically for sunfire
*/
#include <sys/machparam.h>
#include <sys/machsystm.h>
#define SUNDDI_IMPL
#include <sys/ddi_impldefs.h>
#include <sys/machsystm.h>
static enum sysctrl_suspend_state {
SYSC_STATE_BEGIN = 0,
static int pstate_save;
/*
* sysctrl_skip_user_threads is used to control if user threads should
* be suspended. If sysctrl_skip_user_threads is true, the rest of the
* flags are not used; if it is false, sysctrl_check_user_stop_result
* will be used to control whether or not we need to check suspend
* result, and sysctrl_allow_blocked_threads will be used to control
* whether or not we allow suspend to continue if there are blocked
* threads. We allow all combinations of sysctrl_check_user_stop_result
* and sysctrl_allow_block_threads, even though it might not make much
* sense to not allow block threads when we don't even check stop
* result.
*/
static int sysc_watchdog_suspended;
extern int sysctrl_enable_detach_suspend;
static int sysc_lastval;
static void
sysctrl_grab_cpus(void)
{
int i;
extern cpuset_t cpu_ready_set;
extern void sysctrl_freeze(void);
extern u_longlong_t gettick(void);
for (i = 0; i < NCPU; i++)
sysctrl_gate[i] = 0;
/* tell other cpus to go quiet and wait for continue signal */
/* wait for each cpu to check in */
for (i = 0; i < NCPU; i++) {
if (!CPU_IN_SET(others, i))
continue;
/*
* Get current tick value and calculate the deadline tick
*/
sysc_current_tick = gettick();
while (sysctrl_gate[i] == 0) {
/* If in panic, we just return */
if (panicstr)
break;
/* Panic the system if cpu not responsed by deadline */
sysc_current_tick = gettick();
if (sysc_current_tick >= sysc_tick_deadline) {
"responding to quiesce command", i);
}
}
}
/* now even our interrupts are disabled -- really quiet now */
}
static void
sysctrl_release_cpus(void)
{
/* let the other cpus go */
/* restore our interrupts too */
}
static void
sysctrl_stop_intr(void)
{
}
static void
sysctrl_enable_intr(void)
{
(void) spl0();
}
static int
{
int length;
int rc;
return (FALSE);
return (TRUE);
return (FALSE);
/*
* now the general case
*/
if (rc != DDI_PROP_SUCCESS) {
return (FALSE);
} else {
return (TRUE);
}
}
static int
{
int circ;
/*
* Hold parent busy while walking child list
*/
return (ENXIO);
}
if (!sysctrl_is_real_device(dip))
continue;
/*
* Safe to call ddi_pathname() as parent is held busy
*/
device_path));
failed_driver = dip;
return (ENXIO);
}
}
return (DDI_SUCCESS);
}
static void
{
int circ;
/* attach in reverse device tree order */
}
if (dip == failed_driver) {
} else if (sysctrl_is_real_device(dip) &&
failed_driver == NULL) {
/*
* Parent dip is held busy, so ddi_pathname() can
* be safely called.
*/
/*
* XXX - if in the future we decide not to
* panic the system, we need to set the error
* SYSC_ERR_RESUME here and also change the
* cfgadm platform library.
*/
}
}
}
}
/*
* True if thread is virtually stopped. Similar to CPR_VSTOPPED
* but from DR point of view. These user threads are waiting in
* the kernel. Once they complete in the kernel, they will process
* the stop signal and stop.
*/
#define SYSCTRL_VSTOPPED(t) \
(t)->t_astflag && \
((t)->t_proc_flag & TP_CHKPT))
static int
{
int count;
int bailout;
extern void add_one_utstop();
extern void utstop_timedwait(clock_t);
extern void utstop_init(void);
return (DDI_SUCCESS);
utstop_init();
/* we need to try a few times to get past fork, etc. */
/* walk the entire threadlist */
/* handle kernel threads separately */
continue;
mutex_enter(&p->p_lock);
/* add another reason to stop this thread */
} else {
mutex_exit(&p->p_lock);
mutex_enter(&p->p_lock);
}
}
/* grab thread if needed */
mutex_exit(&p->p_lock);
}
/* let everything catch up */
/* now, walk the threadlist again to see if we are done */
/* handle kernel threads separately */
continue;
/*
* If this thread didn't stop, and we don't allow
* unstopped blocked threads, bail.
*/
/* did this thread stop? */
if (!CPR_ISTOPPED(tp) &&
SYSCTRL_VSTOPPED(tp))) {
/* nope, cache the details for later */
sizeof (cache_psargs));
bailout = 1;
}
}
/* were all the threads stopped? */
if (!bailout)
break;
}
/* were we unable to stop all threads after a few tries? */
if (bailout) {
cache_t_state, (void *)cache_tp);
return (ESRCH);
}
return (DDI_SUCCESS);
}
static int
{
if (sysctrl_skip_kernel_threads) {
return (DDI_SUCCESS);
}
/*
* Note: we unlock the table in resume.
* We only need to lock the callback table if we are actually
* suspending kernel threads.
*/
return (EBUSY);
}
/*
* Verify that all threads are accounted for
*/
continue;
continue;
return (EBUSY);
}
}
return (DDI_SUCCESS);
}
static void
{
/* walk all threads and release them */
/* skip kernel threads */
continue;
mutex_enter(&p->p_lock);
mutex_exit(&p->p_lock);
if (CPR_ISTOPPED(tp)) {
/* back on the runq */
}
}
}
static void
{
struct proc *p;
/* only user threads */
continue;
mutex_enter(&p->p_lock);
mutex_exit(&p->p_lock);
}
/* add a bit of delay */
}
void
{
#ifndef Bug_4154263
#endif
switch (suspend_state) {
case SYSC_STATE_FULL:
/*
* release all the other cpus
*/
#ifndef Bug_4154263
#endif
/*
* Prevent false alarm in tod_validate() due to tod
* value change between suspend and resume
*/
/*
* If we suspended hw watchdog at suspend,
* re-enable it now.
*/
if (sysc_watchdog_suspended) {
}
/*
* resume callout
*/
(void) callb_execute_class(CB_CL_CPR_CALLOUT,
/* FALLTHROUGH */
case SYSC_STATE_DRIVER:
/*
* resume drivers
*/
/*
* resume the lock manager
*/
lm_cprresume();
/* FALLTHROUGH */
case SYSC_STATE_DAEMON:
/*
* resume kernel daemons
*/
if (!sysctrl_skip_kernel_threads) {
(void) callb_execute_class(CB_CL_CPR_DAEMON,
}
/* FALLTHROUGH */
case SYSC_STATE_USER:
/*
* finally, resume user threads
*/
if (!sysctrl_skip_user_threads) {
}
/* FALLTHROUGH */
case SYSC_STATE_BEGIN:
default:
/*
* let those who care know that we've just resumed
*/
break;
}
}
void
sysctrl_suspend_prepare(void)
{
/*
* We use a function, lm_cprsuspend(), in the suspend flow that
* is redirected to a module through the modstubs mechanism.
* If the module is currently not loaded, modstubs attempts
* the modload. The context this happens in below causes the
* module load to block forever, so this function must be called
* in the normal system call context ahead of time.
*/
}
int
{
/*
* first, stop all user threads
*/
return (rc);
}
/*
* now stop daemon activities
*/
return (rc);
}
/*
* This sync swap out all user pages
*/
/*
* special treatment for lock manager
*/
/*
* sync the file system in case we never make it back
*/
sync();
/*
* now suspend drivers
*/
return (rc);
}
/*
* handle the callout table
*/
/*
* if watchdog was activated, disable it
*/
if (watchdog_activated) {
} else {
}
/*
* finally, grab all cpus
*/
#ifndef Bug_4154263
#endif
return (rc);
}