i86_subr.s revision ee88d2b9121403275630d2f01e68cca4fc6524bc
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
* All Rights Reserved
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* General assembly language routines.
* It is the intent of this file to contain routines that are
* independent of the specific kernel architecture, and those that are
* common across kernel architectures.
* As architectures diverge, and implementations of specific
* architecture-dependent routines change, the routines should be moved
* from this file into the respective ../`arch -k`/subr.s file.
*/
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include <sys/privregs.h>
#include <sys/x86_archext.h>
#if defined(__lint)
#include <sys/archsystm.h>
#include <sys/byteorder.h>
#else /* __lint */
#include "assym.h"
#endif /* __lint */
#include <sys/dditypes.h>
/*
* on_fault()
* Catch lofault faults. Like setjmp except it returns one
* if code following causes uncorrectable fault. Turned off
* by calling no_fault().
*/
#if defined(__lint)
/* ARGSUSED */
int
{ return (0); }
void
no_fault(void)
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
*/
#if defined(lint)
void
on_trap_trampoline(void)
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* more information about the on_trap() mechanism. If the on_trap_data is the
* same as the topmost stack element, we just modify that element.
*/
#if defined(lint)
/*ARGSUSED*/
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
je 0f /* don't modify t_ontrap */
je 0f /* don't modify t_ontrap */
#endif /* __i386 */
#endif /* __lint */
/*
* Setjmp and longjmp implement non-local gotos using state vectors
* type label_t.
*/
#if defined(__lint)
/* ARGSUSED */
int
{ return (0); }
/* ARGSUSED */
void
{}
#else /* __lint */
#if LABEL_PC != 0
#endif /* LABEL_PC != 0 */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* if a() calls b() calls caller(),
* caller() returns return address in a().
* sequence.)
*/
#if defined(__lint)
caller(void)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* if a() calls callee(), callee() returns the
* return address in a();
*/
#if defined(__lint)
callee(void)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* return the current frame pointer
*/
#if defined(__lint)
getfp(void)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Invalidate a single page table entry in the TLB
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
*/
#if defined(__lint)
getcr0(void)
{ return (0); }
/* ARGSUSED */
void
{}
getcr2(void)
{ return (0); }
getcr3(void)
{ return (0); }
/* ARGSUSED */
void
{}
void
reload_cr3(void)
{}
getcr4(void)
{ return (0); }
/* ARGSUSED */
void
{}
#if defined(__amd64)
getcr8(void)
{ return (0); }
/* ARGSUSED */
void
{}
#endif /* __amd64 */
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Insert entryp after predp in a doubly linked list.
*/
#if defined(__lint)
/*ARGSUSED*/
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Remove entryp from a doubly linked list
*/
#if defined(__lint)
/*ARGSUSED*/
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Returns the number of
* non-NULL bytes in string argument.
*/
#if defined(__lint)
/* ARGSUSED */
{ return (0); }
#else /* __lint */
#if defined(__amd64)
/*
* This is close to a simple transliteration of a C version of this
* routine. We should either just -make- this be a C version, or
* justify having it in assembler by making it significantly faster.
*
* size_t
* strlen(const char *s)
* {
* const char *s0;
* #if defined(DEBUG)
* if ((uintptr_t)s < KERNELBASE)
* panic(.str_panic_msg);
* #endif
* for (s0 = s; *s; s++)
* ;
* return (s - s0);
* }
*/
#ifdef DEBUG
#endif /* DEBUG */
.align 4
#ifdef DEBUG
#endif /* DEBUG */
.align 4
.align 4
#endif /* __i386 */
#ifdef DEBUG
.text
.string "strlen: argument below kernelbase"
#endif /* DEBUG */
#endif /* __lint */
/*
* Berkley 4.3 introduced symbolically named interrupt levels
* as a way deal with priority in a machine independent fashion.
* Numbered priorities are machine specific, and should be
* discouraged where possible.
*
* Note, for the machine specific priorities there are
* examples listed for devices that use a particular priority.
* It should not be construed that all devices of that
* type should be at that priority. It is currently were
* the current devices fit into the priority scheme based
* upon time criticalness.
*
* The underlying assumption of these assignments is that
* IPL 10 is the highest level from which a device
* routine can call wakeup. Devices that interrupt from higher
* levels are restricted in what they can do. If they need
* kernels services they should schedule a routine at a lower
* level (via software interrupt) to do the required
* processing.
*
* Examples of this higher usage:
* Level Usage
* 14 Profiling clock (and PROM uart polling clock)
* 12 Serial ports
*
* The serial ports request lower level processing on level 6.
*
* Also, almost all splN routines (where N is a number or a
* mnemonic) will do a RAISE(), on the assumption that they are
* never used to lower our priority.
* The exceptions are:
* spl8() Because you can't be above 15 to begin with!
* splzs() Because this is used at boot time to lower our
* priority, to allow the PROM to poll the uart.
* spl0() Used to lower priority to 0.
*/
#if defined(__lint)
int spl0(void) { return (0); }
int spl6(void) { return (0); }
int spl7(void) { return (0); }
int spl8(void) { return (0); }
int splhigh(void) { return (0); }
int splhi(void) { return (0); }
int splzs(void) { return (0); }
#else /* __lint */
/* reg = cpu->cpu_m.cpu_pri; */
/* cpu->cpu_m.cpu_pri; */
/* reg = cpu->cpu_m.cpu_pri; */
/* cpu->cpu_m.cpu_pri; */
/*
* Macro to raise processor priority level.
* Avoid dropping processor priority if already at high level.
* Also avoid going below CPU->cpu_base_spl, which could've just been set by
* a higher-level interrupt thread that just blocked.
*/
#if defined(__amd64)
cli; \
cli; \
#endif /* __i386 */
/*
* Macro to set the priority to a specified level.
* Avoid dropping the priority below CPU->cpu_base_spl.
*/
#if defined(__amd64)
cli; \
cli; \
#endif /* __i386 */
/* locks out all interrupts, including memory errors */
SETPRI(15)
/* just below the level that profiling runs */
RAISE(13)
/* sun specific - highest priority onboard serial i/o asy ports */
/*
* should lock out clocks and all interrupts,
* as you can see, there are exceptions
*/
#if defined(__amd64)
.align 16
/*
* If we aren't using cr8 to control ipl then we patch this
* with a jump to slow_setsplhi
*/
/*
* enable interrupts
*/
nop /* patch this to a sti when a proper setspl routine appears */
.align 16
/*
* enable interrupts
*
* (we patch this to an sti once a proper setspl routine
* is installed)
*/
nop /* patch this to a sti when a proper setspl routine appears */
#endif /* __i386 */
/* allow all interrupts */
SETPRI(0)
#endif /* __lint */
/*
* splr is like splx but will only raise the priority and never drop it
*/
#if defined(__lint)
/* ARGSUSED */
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
nop /* patch this to a sti when a proper setspl routine appears */
ret /* else return the current level */
nop /* patch this to a sti when a proper setspl routine appears */
ret /* else return the current level */
#endif /* __i386 */
#endif /* __lint */
/*
* splx - set PIL back to that indicated by the level passed as an argument,
* or to the CPU's base priority, whichever is higher.
* Needs to be fall through to spl to save cycles.
* Algorithm for spl:
*
* turn off interrupts
*
* if (CPU->cpu_base_spl > newipl)
* newipl = CPU->cpu_base_spl;
* oldipl = CPU->cpu_pridata->c_ipl;
* CPU->cpu_pridata->c_ipl = newipl;
*
* /indirectly call function to set spl values (usually setpicmasks)
* setspl(); // load new masks into pics
*
* Be careful not to set priority lower than CPU->cpu_base_pri,
* even though it seems we're raising the priority, it could be set
* higher at any time by an interrupt routine, so we must block interrupts
* and look at CPU->cpu_base_pri
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
cli /* disable interrupts */
/*FALLTHRU*/
.align 4
spl:
/*
* New priority level is in %edi, cpu struct pointer is in %rcx
*/
/*
* If we aren't using cr8 to control ipl then we patch this
* with a jump to slow_spl
*/
/* stack now 16-byte aligned */
nop /* patch this to a sti when a proper setspl routine appears */
/* stack now 16-byte aligned */
/*
* enable interrupts
*/
nop /* patch this to a sti when a proper setspl routine appears */
cli /* disable interrupts */
/*FALLTHRU*/
.align 4
/*
* New priority level is in %edx
* (doing this early to avoid an AGI in the next instruction)
*/
/*
* Before dashing off, check that setsplsti has been patched.
*/
/*
* enable interrupts
*/
nop /* patch this to a sti when a proper setspl routine appears */
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
void
install_spl(void)
{}
#else /* __lint */
#if defined(__amd64)
jmp 1f
2:
/*
* Patch spl functions to use slow spl method
*/
/*
* Ensure %cr8 is zero since we aren't using it
*/
jmp 1f
#endif /* __i386 */
#endif /* __lint */
/*
* Get current processor interrupt level
*/
#if defined(__lint)
int
getpil(void)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__i386)
/*
* Read and write the %gs register
*/
#if defined(__lint)
/*ARGSUSED*/
getgs(void)
{ return (0); }
/*ARGSUSED*/
void
{}
#else /* __lint */
#endif /* __lint */
#endif /* __i386 */
#if defined(__lint)
void
pc_reset(void)
{}
#else /* __lint */
1:
jnz 1b
#define RESET_METHOD_KBC 1
#define RESET_METHOD_PORT92 2
#define RESET_METHOD_PCI 4
#if defined(__i386)
#endif
jz 1f
/
/
1:
#if defined(__i386)
#endif
jz 3f
/
/
je 1f
testb $1, %al / If bit 0
jz 2f / is clear, jump to perform the reset
andb $0xfe, %al / otherwise,
outb (%dx) / clear bit 0 first, then
2:
orb $1, %al / Set bit 0
outb (%dx) / and reset the system
1:
call wait_500ms
3:
#if defined(__i386)
testl $RESET_METHOD_PCI, pc_reset_methods
#elif defined(__amd64)
testl $RESET_METHOD_PCI, pc_reset_methods(%rip)
#endif
jz 4f
/ Try the PCI (soft) reset vector (should work on all modern systems,
/ but has been shown to cause problems on 450NX systems, and some newer
/ systems (e.g. ATI IXP400-equipped systems))
/ When resetting via this method, 2 writes are required. The first
/ targets bit 1 (0=hard reset without power cycle, 1=hard reset with
/ power cycle).
/ 0->1.
4:
/
/
#if defined(__amd64)
pushq $0x0
pushl $0x0
#endif
/*NOTREACHED*/
#endif /* __lint */
/*
* C callable in and out routines
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
inl(int port_address)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
inw(int port_address)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
inb(int port_address)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
/*
* The arguments and saved registers are on the stack in the
* following order:
* | cnt | +16
* | *addr | +12
* | port | +8
* | eip | +4
* | esi | <-- %esp
* If additional values are pushed onto the stack, make sure
* to adjust the following constants accordingly.
*/
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
/*
* The arguments and saved registers are on the stack in the
* following order:
* | cnt | +16
* | *addr | +12
* | port | +8
* | eip | +4
* | esi | <-- %esp
* If additional values are pushed onto the stack, make sure
* to adjust the following constants accordingly.
*/
#endif /* __i386 */
#endif /* __lint */
/*
* Input a stream of 32-bit words.
* NOTE: count is a DWORD count.
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Output a stream of bytes
* NOTE: count is a byte count
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Output a stream of 32-bit words
* NOTE: count is a DWORD count
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* void int3(void)
* void int18(void)
* void int20(void)
*/
#if defined(__lint)
void
int3(void)
{}
void
int18(void)
{}
void
int20(void)
{}
#else /* __lint */
int $T_BPTFLT
int $T_MCE
jz 1f
int $T_DBGENTR
1:
/* AMD Software Optimization Guide - Section 6.2 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
/* rdi == size */
/* rsi == cp */
/* rdx == table */
/* rcx == mask */
.scanloop:
.scandone:
.scanloop:
.scandone:
#endif /* __i386 */
#endif /* __lint */
/*
* Replacement functions for ones that are normally inlined.
* In addition to the copy in i86.il, they are defined here just in case.
*/
#if defined(__lint)
int
intr_clear(void)
{ return 0; }
int
clear_int_flag(void)
{ return 0; }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
struct cpu *
curcpup(void)
{ return 0; }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
{ return (0); }
/* ARGSUSED */
{ return (0); }
#else /* __lint */
#if defined(__amd64)
/* XX64 there must be shorter sequences for this */
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
{ return (0); }
/* ARGSUSED */
{ return (0); }
#else /* __lint */
#if defined(__amd64)
/* XX64 there must be better sequences for this */
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{ return; }
/* ARGSUSED */
void
restore_int_flag(int i)
{ return; }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
void
sti(void)
{}
#else /* __lint */
#endif /* __lint */
#if defined(__lint)
dtrace_interrupt_disable(void)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(lint)
void
dtrace_membar_producer(void)
{}
void
dtrace_membar_consumer(void)
{}
#else /* __lint */
/* AMD Software Optimization Guide - Section 6.2 */
/* AMD Software Optimization Guide - Section 6.2 */
#endif /* __lint */
#if defined(__lint)
threadp(void)
{ return ((kthread_id_t)0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Checksum routine for Internet Protocol Headers
*/
#if defined(__lint)
/* ARGSUSED */
unsigned int
int halfword_count, /* length of data */
unsigned int sum) /* partial checksum */
{
int i;
unsigned int psum = 0; /* partial sum */
for (i = 0; i < halfword_count; i++, address++) {
}
while ((psum >> 16) != 0) {
}
while ((psum >> 16) != 0) {
}
return (psum);
}
#else /* __lint */
#if defined(__amd64)
#ifdef DEBUG
jnb 1f
/*NOTREACHED*/
.string "ip_ocsum: address 0x%p below kernelbase\n"
1:
#endif
/* partial sum in %edx */
.ip_csum_aligned: /* XX64 opportunities for 8-byte operations? */
/* XX64 opportunities for prefetch? */
/* XX64 compute csum with 64 bit quantities? */
.only60:
.only56:
.only52:
.only48:
.only44:
.only40:
.only36:
.only32:
.only28:
.only24:
.only20:
.only16:
.only12:
.only8:
.only4:
.only0:
.align 8
.only60:
.only56:
.only52:
.only48:
.only44:
.only40:
.only36:
.only32:
.only28:
.only24:
.only20:
.only16:
.only12:
.only8:
.only4:
.only0:
.data
.align 4
#endif /* __i386 */
#endif /* __lint */
/*
* multiply two long numbers and yield a u_longlong_t result, callable from C.
* Provided to manipulate hrtime_t values.
*/
#if defined(__lint)
/* result = a * b; */
/* ARGSUSED */
unsigned long long
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(notused)
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#endif /* __lint */
#endif /* notused */
#if defined(__lint)
/*ARGSUSED*/
void
{}
#else /* __lint */
#if defined(__amd64)
/* AMD Software Optimization Guide - Section 6.2 */
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED */
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
{ return (0); }
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
{ return (0); }
/*ARGSUSED*/
void
{}
void
invalidate_cache(void)
{}
#else /* __lint */
#define XMSR_ACCESS_VAL $0x9c5a203a
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
{}
#else /* __lint */
#if defined(__amd64)
rdmsr; \
.nocr4:
.skip:
/* AMD Software Optimization Guide - Section 6.2 */
#endif /* __i386 */
#endif /* __lint */
/*
* A panic trigger is a word which is updated atomically and can only be set
* once. We atomically store 0xDEFACEDD and load the old value. If the
* previous value was 0, we succeed and return 1; otherwise return 0.
* This allows a partially corrupt trigger to still trigger correctly. DTrace
* has its own version of this function to allow it to panic correctly from
* probe context.
*/
#if defined(__lint)
/*ARGSUSED*/
int
panic_trigger(int *tp)
{ return (0); }
/*ARGSUSED*/
int
dtrace_panic_trigger(int *tp)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
je 0f
je 0f
je 0f / return (1);
ret / return (0);
je 0f / return (1);
ret / return (0);
#endif /* __i386 */
#endif /* __lint */
/*
* The panic() and cmn_err() functions invoke vpanic() as a common entry point
* into the panic code implemented in panicsys(). vpanic() is responsible
* for passing through the format string and arguments, and constructing a
* regs structure on the stack into which it saves the current register
* values. If we are not dying due to a fatal trap, these registers will
* then be preserved in panicbuf as the current processor state. Before
* invoking panicsys(), vpanic() activates the first panic trigger (see
* DTrace takes a slightly different panic path if it must panic from probe
* context. Instead of calling panic, it calls into dtrace_vpanic(), which
* sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
* branches back into vpanic().
*/
#if defined(__lint)
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
#else /* __lint */
#if defined(__amd64)
pushfq /* | rfl | 0x50 */
je 0f
/*
* If panic_trigger() was successful, we are the first to initiate a
* panic: we now switch to the reserved panic_stack before continuing.
*/
/*
* Now that we've got everything set up, store the register values as
* they were when we entered vpanic() to the designated location in
* the regs structure we allocated on the stack.
*/
/*
* panicsys(format, alist, rp, on_panic_stack)
*/
pushfq /* | rfl | 0x50 */
je 0f / goto 0f;
/*
* If panic_trigger() was successful, we are the first to initiate a
* panic: we now switch to the reserved panic_stack before continuing.
*/
/*
* Now that we've got everything set up, store the register values as
* they were when we entered vpanic() to the designated location in
* the regs structure we allocated on the stack.
*/
#if !defined(__GNUC_AS__)
#else /* __GNUC_AS__ */
#endif /* __GNUC_AS__ */
#if !defined(__GNUC_AS__)
#else /* __GNUC_AS__ */
#endif /* __GNUC_AS__ */
#if !defined(__GNUC_AS__)
#else /* __GNUC_AS__ */
#endif /* __GNUC_AS__ */
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
void
hres_tick(void)
{}
volatile int hres_lock;
#else /* __lint */
.NWORD 0, 0
.long 0, 0
.long 0, 0
.long 0, 0
.long 0
/*
* initialized to a non zero value to make pc_gethrtime()
* work correctly even before clock is initialized
*/
.long ADJ_SHIFT
#if defined(__amd64)
/*
* We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
* hres_last_tick can only be modified while holding CLOCK_LOCK).
* At worst, performing this now instead of under CLOCK_LOCK may
* introduce some jitter in pc_gethrestime().
*/
.CL1:
.CL2:
.CL3:
/*
* compute the interval since last time hres_tick was called
* and adjust hrtime_base and hrestime accordingly
* hrtime_base is an 8 byte value (in nsec), hrestime is
* a timestruc_t (sec, nsec)
*/
/*
* Now that we have CLOCK_LOCK, we can update hres_last_tick
*/
/*
* release the hres_lock
*/
/*
* We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
* hres_last_tick can only be modified while holding CLOCK_LOCK).
* At worst, performing this now instead of under CLOCK_LOCK may
* introduce some jitter in pc_gethrestime().
*/
.CL1:
.CL2:
.CL3:
/*
* compute the interval since last time hres_tick was called
* and adjust hrtime_base and hrestime accordingly
* hrtime_base is an 8 byte value (in nsec), hrestime is
* timestruc_t (sec, nsec)
*/
/
/
/
/
/ (max_hres_adj)
/
/ void
/ adj_hrestime()
/ {
/ long long adj;
/
/ if (hrestime_adj == 0)
/ adj = 0;
/ else if (hrestime_adj > 0) {
/ if (hrestime_adj < HRES_ADJ)
/ adj = hrestime_adj;
/ else
/ }
/ else {
/ if (hrestime_adj < -(HRES_ADJ))
/ else
/ adj = hrestime_adj;
/ }
/
/ hrestime_adj = timedelta;
/
/ one_sec++;
/ }
/ }
.CL4:
/
/
/ !(hrestime_adj < HRES_ADJ)
/
/
/
/
/
/
/
/ (hrestime_adj > -HRES_ADJ)
/
/
/
/
/
.CL7:
.CL5:
1:
.CL8:
#endif /* __i386 */
#endif /* __lint */
/*
* void prefetch_smap_w(void *)
*
* Prefetch ahead within a linear list of smap structures.
* Not implemented for ia32. Stub for compatibility.
*/
#if defined(__lint)
/*ARGSUSED*/
void prefetch_smap_w(void *smp)
{}
#else /* __lint */
/* AMD Software Optimization Guide - Section 6.2 */
#endif /* __lint */
/*
* prefetch_page_r(page_t *)
* issue prefetch instructions for a page_t
*/
#if defined(__lint)
/*ARGSUSED*/
void
prefetch_page_r(void *pp)
{}
#else /* __lint */
/* AMD Software Optimization Guide - Section 6.2 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#ifdef DEBUG
jb 0f
jnb 1f
1:
#endif /* DEBUG */
#define ARG_S1 8
#define ARG_S2 12
#define ARG_LENGTH 16
#ifdef DEBUG
jb 0f
jnb 1f
0: pushl $.bcmp_panic_msg
#endif /* DEBUG */
.align 4
.align 4
.equal:
.align 4
#endif /* __i386 */
#ifdef DEBUG
.text
.string "bcmp: arguments below kernelbase"
#endif /* DEBUG */
#endif /* __lint */