/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#else /* __lint */
#include "assym.h"
#endif /* __lint */
#include <sys/mutex_impl.h>
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include <sys/rwlock_impl.h>
#include <sys/lockstat.h>
/*
* lock_try(lp), ulock_try(lp)
* - returns non-zero on success.
* - doesn't block interrupts so don't use this to spin on a lock.
*
* ulock_try() is for a lock in the user address space.
*/
/* ARGSUSED */
int
{ return (0); }
/* ARGSUSED */
int
{ return (0); }
/* ARGSUSED */
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
jnz 0f
0:
#ifdef DEBUG
#endif /* DEBUG */
#else
jz 0f
0:
#ifdef DEBUG
#endif /* DEBUG */
#endif /* !__amd64 */
#ifdef DEBUG
.data
.string "ulock_try: Argument is above kernelbase"
.text
#endif /* DEBUG */
#endif /* __lint */
/*
* lock_clear(lp)
* - unlock lock without changing interrupt priority level.
*/
/* ARGSUSED */
void
{}
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#ifdef DEBUG
#endif
#else
#ifdef DEBUG
#endif
#endif /* !__amd64 */
#ifdef DEBUG
.data
.string "ulock_clear: Argument is above kernelbase"
.text
#endif /* DEBUG */
#endif /* __lint */
/*
* lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
* Drops lp, sets pil to new_pil, stores old pil in *old_pil.
*/
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
.lss_miss:
leave /* unwind stack */
#else
.lss_miss:
#endif /* !__amd64 */
#endif /* __lint */
/*
* void
* lock_init(lp)
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#else
#endif /* !__amd64 */
#endif /* __lint */
/*
* void
* lock_set(lp)
*/
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#else
#endif /* !__amd64 */
#endif /* __lint */
/*
* lock_clear_splx(lp, s)
*/
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
jmp 0f
0:
leave /* unwind stack */
#else
jmp 0f
0:
leave /* unwind stack */
#endif /* !__amd64 */
#if defined(__GNUC_AS__)
#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL \
#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT \
#else
#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL \
#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT \
#endif
#endif /* __lint */
/*
* mutex_enter() and mutex_exit().
*
* These routines handle the simple cases of mutex_enter() (adaptive
* lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
* If anything complicated is going on we punt to mutex_vector_enter().
*
* mutex_tryenter() is similar to mutex_enter() but returns zero if
* the lock cannot be acquired, nonzero on success.
*
* If mutex_exit() gets preempted in the window between checking waiters
* and clearing the lock, we can miss wakeups. Disabling preemption
* in the mutex code is prohibitively expensive, so instead we detect
* mutex preemption by examining the trapped PC in the interrupt path.
* If we interrupt a thread in mutex_exit() that has not yet cleared
* the lock, cmnint() resets its PC back to the beginning of
* mutex_exit() so it will check again for waiters when it resumes.
*
* The lockstat code below is activated when the lockstat driver
* calls lockstat_hot_patch() to hot-patch the kernel mutex code.
* Note that we don't need to test lockstat_event_mask here -- we won't
* patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
*/
/* ARGSUSED */
void
{}
/* ARGSUSED */
int
{ return (0); }
/* ARGSUSED */
int
{ return (0); }
/* ARGSUSED */
void
{}
#else
#if defined(__amd64)
#if defined(OPTERON_WORKAROUND_6323525)
ret /* nop space for lfence */
.mutex_enter_lockstat_6323525_patch_point: /* new patch point if lfence */
#else /* OPTERON_WORKAROUND_6323525 */
#endif /* OPTERON_WORKAROUND_6323525 */
/*
* expects %rdx=thread, %rsi=lock, %edi=lockstat event
*/
jz 1f
leave /* unwind stack */
1:
/*
* expects %rcx=thread, %rdx=arg, %rsi=lock, %edi=lockstat event
*/
jz 1f
leave /* unwind stack */
1:
#if defined(OPTERON_WORKAROUND_6323525)
ret /* nop space for lfence */
.mutex_tryenter_lockstat_6323525_patch_point: /* new patch point if lfence */
#else /* OPTERON_WORKAROUND_6323525 */
#endif /* OPTERON_WORKAROUND_6323525 */
jnz 0f
#if defined(OPTERON_WORKAROUND_6323525)
ret /* nop space for lfence */
#else /* OPTERON_WORKAROUND_6323525 */
#endif /* OPTERON_WORKAROUND_6323525 */
0:
1:
2:
#else
#if defined(OPTERON_WORKAROUND_6323525)
ret /* nop space for lfence */
.mutex_enter_lockstat_6323525_patch_point: /* new patch point if lfence */
#else /* OPTERON_WORKAROUND_6323525 */
#endif /* OPTERON_WORKAROUND_6323525 */
/* eax=lockstat event */
jz 1f
1:
/* eax=lockstat event, pushed arg */
jz 1f
1:
#if defined(OPTERON_WORKAROUND_6323525)
ret /* nop space for lfence */
.mutex_tryenter_lockstat_6323525_patch_point: /* new patch point if lfence */
#else /* OPTERON_WORKAROUND_6323525 */
#endif /* OPTERON_WORKAROUND_6323525 */
jnz 0f
#if defined(OPTERON_WORKAROUND_6323525)
ret /* nop space for lfence */
#else /* OPTERON_WORKAROUND_6323525 */
#endif /* OPTERON_WORKAROUND_6323525 */
0:
1:
2:
#endif /* !__amd64 */
#endif /* __lint */
/*
* rw_enter() and rw_exit().
*
* These routines handle the simple cases of rw_enter (write-locking an unheld
* lock or read-locking a lock that's neither write-locked nor write-wanted)
* and rw_exit (no waiters or not the last reader). If anything complicated
* is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
*/
/* ARGSUSED */
void
{}
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#if defined(OPTERON_WORKAROUND_6323525)
#else /* OPTERON_WORKAROUND_6323525 */
#endif /* OPTERON_WORKAROUND_6323525 */
#else
#if defined(OPTERON_WORKAROUND_6323525)
#else /* OPTERON_WORKAROUND_6323525 */
#endif /* OPTERON_WORKAROUND_6323525 */
#endif /* !__amd64 */
#endif /* __lint */
#if defined(OPTERON_WORKAROUND_6323525)
void
patch_workaround_6323525(void)
{}
#else /* lint */
/*
* If it is necessary to patch the lock enter routines with the lfence
* workaround, workaround_6323525_patched is set to a non-zero value so that
* the lockstat_hat_patch routine can patch to the new location of the 'ret'
* instruction.
*/
.long 0
#if defined(__amd64)
0: \
jg 0b;
/*
* patch_workaround_6323525: provide workaround for 6323525
*
* The workaround is to place a fencing instruction (lfence) between the
* mutex operation and the subsequent read-modify-write instruction.
*
* This routine hot patches the lfence instruction on top of the space
* reserved by nops in the lock enter routines.
*/
/*
* lockstat_hot_patch() to use the alternate lockstat workaround
* 6323525 patch points (points past the lfence instruction to the
* new ret) when workaround_6323525_patched is set.
*/
/*
* routines. The 4 bytes are patched in reverse order so that the
* the existing ret is overwritten last. This provides lock enter
* sanity during the intermediate patching stages.
*/
#else /* __amd64 */
0: \
pushl $1; \
jg 0b;
/* see comments above */
#endif /* !__amd64 */
#endif /* !lint */
#endif /* OPTERON_WORKAROUND_6323525 */
void
lockstat_hot_patch(void)
{}
#else
#if defined(__amd64)
jz 9f; \
9: \
#else
jz . + 4; \
#endif /* !__amd64 */
#if defined(__amd64)
#endif /* __amd64 */
#if defined(OPTERON_WORKAROUND_6323525)
je 1f
jmp 2f
1:
2:
#else /* OPTERON_WORKAROUND_6323525 */
#endif /* !OPTERON_WORKAROUND_6323525 */
#if defined(__amd64)
leave /* unwind stack */
#endif /* __amd64 */
#endif /* __lint */
/* XX64 membar_*() should be inlines */
void
membar_sync(void)
{}
void
membar_enter(void)
{}
void
membar_exit(void)
{}
void
membar_producer(void)
{}
void
membar_consumer(void)
{}
#else /* __lint */
#if defined(__amd64)
mfence /* lighter weight than lock; xorq $0,(%rsp) */
#else
/*
* On machines that support sfence and lfence, these
* memory barriers can be more precisely implemented
* without causing the whole world to stop
*/
#endif /* !__amd64 */
#endif /* __lint */
/*
* thread_onproc()
* Set thread in onproc state for the specified CPU.
* Also set the thread lock pointer to the CPU's onproc lock.
* Since the new lock isn't held, the store ordering is important.
* If not done in assembler, the compiler could reorder the stores.
*/
void
{
}
#else /* __lint */
#if defined(__amd64)
#else
#endif /* !__amd64 */
#endif /* __lint */
/*
* mutex_delay_default(void)
* Spins for approx a few hundred processor cycles and returns to caller.
*/
void
mutex_delay_default(void)
{}
#else /* __lint */
#if defined(__amd64)
jg 0b
#else
jg 0b
#endif /* !__amd64 */
#endif /* __lint */