atomic.s revision 9d0d62ad2e60e8f742a2e723d06e88352ee6a1f3
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/asm_linkage.h>
/*
* ATOMIC_BO_ENABLE_SHIFT can be selectively defined by processors
* to enable exponential backoff. No definition means backoff is
* not desired i.e. backoff should be disabled.
* By default, the shift value is used to generate a power of 2
* value for backoff limit. In the kernel, processors scale this
* shift value with the number of online cpus.
*/
#if defined(_KERNEL)
/*
* Legacy kernel interfaces; they will go away (eventually).
*/
#ifdef ATOMIC_BO_ENABLE_SHIFT
#if !defined(lint)
#endif /* lint */
/*
* For the kernel, invoke processor specific delay routine to perform
* low-impact spin delay. The value of ATOMIC_BO_ENABLE_SHIFT is tuned
* with respect to the specific spin delay implementation.
*/
/* ; \
* Define a pragma weak reference to a cpu specific ; \
* delay routine for atomic backoff. For CPUs that ; \
* have no such delay routine defined, the delay becomes ; \
* just a simple tight loop. ; \
* ; \
* tmp1 = holds CPU specific delay routine ; \
* tmp2 = holds atomic routine's callee return address ; \
*/ ; \
label/**/0: ; \
nop /* delay slot : do nothing */ ; \
/*
* For the kernel, we take into consideration of cas failures
* and also scale the backoff limit w.r.t. the number of cpus.
* For cas failures, we reset the backoff value to 1 if the cas
* failures exceed or equal to the number of online cpus. This
* will enforce some degree of fairness and prevent starvation.
* ATOMIC_BO_ENABLE_SHIFT w.r.t. the number of online cpus to
* obtain the actual final limit to use.
*/
label/**/0: ; \
#endif /* ATOMIC_BO_ENABLE_SHIFT */
#else /* _KERNEL */
/*
* libc atomics. None for now.
*/
#ifdef ATOMIC_BO_ENABLE_SHIFT
label/**/0:
#endif /* ATOMIC_BO_ENABLE_SHIFT */
#endif /* _KERNEL */
#ifdef ATOMIC_BO_ENABLE_SHIFT
/*
* ATOMIC_BACKOFF_INIT macro for initialization.
* backoff val is initialized to 1.
* ncpu is initialized to 0
* The cas_cnt counts the cas instruction failure and is
* initialized to 0.
*/
/*
* Main ATOMIC_BACKOFF_BACKOFF macro for backoff.
*/
nop ; \
#else /* ATOMIC_BO_ENABLE_SHIFT */
#endif /* ATOMIC_BO_ENABLE_SHIFT */
/*
* NOTE: If atomic_inc_8 and atomic_inc_8_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_inc_8_nv.
*/
/*
* NOTE: If atomic_dec_8 and atomic_dec_8_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_dec_8_nv.
*/
/*
* NOTE: If atomic_add_8 and atomic_add_8_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_add_8_nv.
*/
1:
/*
* NOTE: If atomic_inc_16 and atomic_inc_16_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_inc_16_nv.
*/
/*
* NOTE: If atomic_dec_16 and atomic_dec_16_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_dec_16_nv.
*/
/*
* NOTE: If atomic_add_16 and atomic_add_16_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_add_16_nv.
*/
1:
/*
* NOTE: If atomic_inc_32 and atomic_inc_32_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_inc_32_nv.
*/
/*
* NOTE: If atomic_dec_32 and atomic_dec_32_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_dec_32_nv.
*/
/*
* NOTE: If atomic_add_32 and atomic_add_32_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_add_32_nv.
*/
0:
1:
2:
/*
* NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_inc_64_nv.
*/
/*
* NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_dec_64_nv.
*/
/*
* NOTE: If atomic_add_64 and atomic_add_64_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_add_64_nv.
*/
0:
1:
2:
/*
* NOTE: If atomic_or_8 and atomic_or_8_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_or_8_nv.
*/
1:
/*
* NOTE: If atomic_or_16 and atomic_or_16_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_or_16_nv.
*/
1:
/*
* NOTE: If atomic_or_32 and atomic_or_32_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_or_32_nv.
*/
0:
1:
2:
/*
* NOTE: If atomic_or_64 and atomic_or_64_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_or_64_nv.
*/
0:
1:
2:
/*
* NOTE: If atomic_and_8 and atomic_and_8_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_and_8_nv.
*/
1:
/*
* NOTE: If atomic_and_16 and atomic_and_16_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_and_16_nv.
*/
1:
/*
* NOTE: If atomic_and_32 and atomic_and_32_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_and_32_nv.
*/
0:
1:
2:
/*
* NOTE: If atomic_and_64 and atomic_and_64_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_and_64_nv.
*/
0:
1:
2:
1:
2:
1:
2:
1:
1:
0:
1:
2:
0:
1:
2:
0:
1:
2:
5:
0:
1:
2:
5:
#if !defined(_KERNEL)
/*
* Spitfires and Blackbirds have a problem with membars in the
* delay slot (SF_ERRATA_51). For safety's sake, we assume
* that the whole world needs the workaround.
*/
#endif /* !_KERNEL */