/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#if defined(lint)
#else /* lint */
#include "assym.h"
#endif /* lint */
#include <sys/mutex_impl.h>
#include <sys/rwlock_impl.h>
#include <sys/asm_linkage.h>
#include <sys/machlock.h>
#include <sys/machthread.h>
#include <sys/lockstat.h>
/* #define DEBUG */
#ifdef DEBUG
#include <sys/machparam.h>
#endif /* DEBUG */
/************************************************************************
* ATOMIC OPERATIONS
*/
/*
* uint8_t ldstub(uint8_t *cp)
*
* Store 0xFF at the specified location, and return its previous content.
*/
#if defined(lint)
{
*cp = 0xFF;
return rv;
}
#else /* lint */
#endif /* lint */
/************************************************************************
* MEMORY BARRIERS -- see atomic.h for full descriptions.
*/
#if defined(lint)
void
membar_enter(void)
{}
void
membar_exit(void)
{}
void
membar_producer(void)
{}
void
membar_consumer(void)
{}
#else /* lint */
#ifdef SF_ERRATA_51
.align 32
#else
#endif
#endif /* lint */
/************************************************************************
* MINIMUM LOCKS
*/
#if defined(lint)
/*
* lock_try(lp), ulock_try(lp)
* - returns non-zero on success.
* - doesn't block interrupts so don't use this to spin on a lock.
* - uses "0xFF is busy, anything else is free" model.
*
* ulock_try() is for a lock in the user address space.
* user are mapped in a user' context.
* For V9 platforms the lock_try and ulock_try are different impl.
*/
int
{
}
int
{
}
void
{
extern void lock_set_spin(lock_t *);
membar_enter();
}
void
{
membar_exit();
*lp = 0;
}
int
{
}
void
{
membar_exit();
*lp = 0;
}
#else /* lint */
.align 32
1:
.align 32
1:
.align 32
1:
.align 32
#endif /* lint */
/*
* lock_set_spl(lp, new_pil, *old_pil_addr)
* Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
*/
#if defined(lint)
/* ARGSUSED */
void
{
extern int splr(int);
int old_pil;
} else {
membar_enter();
}
}
#else /* lint */
1:
2:
#endif /* lint */
/*
* lock_clear_splx(lp, s)
*/
#if defined(lint)
void
{
extern void splx(int);
lock_clear(lp);
splx(s);
}
#else /* lint */
#endif /* lint */
/*
* mutex_enter() and mutex_exit().
*
* These routines handle the simple cases of mutex_enter() (adaptive
* lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
* If anything complicated is going on we punt to mutex_vector_enter().
*
* mutex_tryenter() is similar to mutex_enter() but returns zero if
* the lock cannot be acquired, nonzero on success.
*
* If mutex_exit() gets preempted in the window between checking waiters
* and clearing the lock, we can miss wakeups. Disabling preemption
* in the mutex code is prohibitively expensive, so instead we detect
* mutex preemption by examining the trapped PC in the interrupt path.
* If we interrupt a thread in mutex_exit() that has not yet cleared
* the lock, pil_interrupt() resets its PC back to the beginning of
* mutex_exit() so it will check again for waiters when it resumes.
*
* The lockstat code below is activated when the lockstat driver
* calls lockstat_hot_patch() to hot-patch the kernel mutex code.
* Note that we don't need to test lockstat_event_mask here -- we won't
* patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
*/
#if defined (lint)
/* ARGSUSED */
void
{}
/* ARGSUSED */
int
{ return (0); }
/* ARGSUSED */
void
{}
/* ARGSUSED */
void *
{ return (NULL); }
#else
.align 32
1:
1:
0:
.align 64
.align 32
1:
mutex_owner_running_critical_size = .mutex_owner_running_critical_end - mutex_owner_running_critical_start
.align 32
1:
2:
#endif /* lint */
/*
* rw_enter() and rw_exit().
*
* These routines handle the simple cases of rw_enter (write-locking an unheld
* lock or read-locking a lock that's neither write-locked nor write-wanted)
* and rw_exit (no waiters or not the last reader). If anything complicated
* is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
*/
#if defined(lint)
/* ARGSUSED */
void
{}
/* ARGSUSED */
void
{}
#else
.align 16
1:
3:
#ifdef sun4v
0:
#else /* sun4v */
#endif /* sun4v */
2:
4:
.align 16
1:
2:
3:
#endif
#if defined(lint)
void
lockstat_hot_patch(void)
{}
#else
ba 1f; \
0: ret; \
#endif /* lint */
/*
* asm_mutex_spin_enter(mutex_t *)
*
* For use by assembly interrupt handler only.
* Does not change spl, since the interrupt handler is assumed to be
* running at high level already.
* Traps may be off, so cannot panic.
* Does not keep statistics on the lock.
*
* Entry: %l6 - points to mutex
* %l7 - address of call (returns to %l7+8)
* Uses: %l6, %l5
*/
#ifndef lint
.align 16
1:
2:
!
!
3:
4:
b 4b
#endif /* lint */
/*
* asm_mutex_spin_exit(mutex_t *)
*
* For use by assembly interrupt handler only.
* Does not change spl, since the interrupt handler is assumed to be
* running at high level already.
*
* Entry: %l6 - points to mutex
* %l7 - address of call (returns to %l7+8)
* Uses: none
*/
#ifndef lint
#endif /* lint */
/*
* thread_onproc()
* Set thread in onproc state for the specified CPU.
* Also set the thread lock pointer to the CPU's onproc lock.
* Since the new lock isn't held, the store ordering is important.
* If not done in assembler, the compiler could reorder the stores.
*/
#if defined(lint)
void
{
}
#else /* lint */
retl ! return
#endif /* lint */
/* delay function used in some mutex code - just do 3 nop cas ops */
#if defined(lint)
/* ARGSUSED */
void
{}
#else /* lint */
#endif /* lint */
#if defined(lint)
/*
* alternative delay function for some niagara processors. The rd
* instruction uses less resources than casx on those cpus.
*/
/* ARGSUSED */
void
rdccr_delay(void)
{}
#else /* lint */
#endif /* lint */
/*
* mutex_delay_default(void)
* Spins for approx a few hundred processor cycles and returns to caller.
*/
#if defined(lint)
void
mutex_delay_default(void)
{}
#else /* lint */
#endif /* lint */