i86_subr.s revision 7c478bd95313f5f23a4c958a745db2134aa03244
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
* All Rights Reserved
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* General assembly language routines.
* It is the intent of this file to contain routines that are
* independent of the specific kernel architecture, and those that are
* common across kernel architectures.
* As architectures diverge, and implementations of specific
* architecture-dependent routines change, the routines should be moved
* from this file into the respective ../`arch -k`/subr.s file.
*/
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include <sys/privregs.h>
#include <sys/x86_archext.h>
#if defined(__lint)
#include <sys/archsystm.h>
#include <sys/byteorder.h>
#else /* __lint */
#include "assym.h"
#endif /* __lint */
#include <sys/dditypes.h>
/*
* on_fault()
* Catch lofault faults. Like setjmp except it returns one
* if code following causes uncorrectable fault. Turned off
* by calling no_fault().
*/
#if defined(__lint)
/* ARGSUSED */
int
{ return (0); }
void
no_fault(void)
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
*/
#if defined(lint)
void
on_trap_trampoline(void)
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* more information about the on_trap() mechanism. If the on_trap_data is the
* same as the topmost stack element, we just modify that element.
*/
#if defined(lint)
/*ARGSUSED*/
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
je 0f /* don't modify t_ontrap */
je 0f /* don't modify t_ontrap */
#endif /* __i386 */
#endif /* __lint */
/*
* Setjmp and longjmp implement non-local gotos using state vectors
* type label_t.
*/
#if defined(__lint)
/* ARGSUSED */
int
{ return (0); }
/* ARGSUSED */
void
{}
#else /* __lint */
#if LABEL_PC != 0
#endif /* LABEL_PC != 0 */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* if a() calls b() calls caller(),
* caller() returns return address in a().
* sequence.)
*/
#if defined(__lint)
caller(void)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* if a() calls callee(), callee() returns the
* return address in a();
*/
#if defined(__lint)
callee(void)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* return the current frame pointer
*/
#if defined(__lint)
getfp(void)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Invalidate a single page table entry in the TLB
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
*/
#if defined(__lint)
getcr0(void)
{ return (0); }
/* ARGSUSED */
void
{}
getcr2(void)
{ return (0); }
getcr3(void)
{ return (0); }
/* ARGSUSED */
void
{}
void
reload_cr3(void)
{}
getcr4(void)
{ return (0); }
/* ARGSUSED */
void
{}
#if defined(__amd64)
getcr8(void)
{ return (0); }
/* ARGSUSED */
void
{}
#endif /* __amd64 */
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Insert entryp after predp in a doubly linked list.
*/
#if defined(__lint)
/*ARGSUSED*/
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Remove entryp from a doubly linked list
*/
#if defined(__lint)
/*ARGSUSED*/
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Returns the number of
* non-NULL bytes in string argument.
*/
#if defined(__lint)
/* ARGSUSED */
{ return (0); }
#else /* __lint */
#if defined(__amd64)
/*
* This is close to a simple transliteration of a C version of this
* routine. We should either just -make- this be a C version, or
* justify having it in assembler by making it significantly faster.
*
* size_t
* strlen(const char *s)
* {
* const char *s0;
* #if defined(DEBUG)
* if ((uintptr_t)s < KERNELBASE)
* panic(.str_panic_msg);
* #endif
* for (s0 = s; *s; s++)
* ;
* return (s - s0);
* }
*/
#ifdef DEBUG
#endif /* DEBUG */
.align 4
#ifdef DEBUG
#endif /* DEBUG */
.align 4
.align 4
#endif /* __i386 */
#ifdef DEBUG
.text
.string "strlen: argument below kernelbase"
#endif /* DEBUG */
#endif /* __lint */
/*
* Berkley 4.3 introduced symbolically named interrupt levels
* as a way deal with priority in a machine independent fashion.
* Numbered priorities are machine specific, and should be
* discouraged where possible.
*
* Note, for the machine specific priorities there are
* examples listed for devices that use a particular priority.
* It should not be construed that all devices of that
* type should be at that priority. It is currently were
* the current devices fit into the priority scheme based
* upon time criticalness.
*
* The underlying assumption of these assignments is that
* IPL 10 is the highest level from which a device
* routine can call wakeup. Devices that interrupt from higher
* levels are restricted in what they can do. If they need
* kernels services they should schedule a routine at a lower
* level (via software interrupt) to do the required
* processing.
*
* Examples of this higher usage:
* Level Usage
* 14 Profiling clock (and PROM uart polling clock)
* 12 Serial ports
*
* The serial ports request lower level processing on level 6.
*
* Also, almost all splN routines (where N is a number or a
* mnemonic) will do a RAISE(), on the assumption that they are
* never used to lower our priority.
* The exceptions are:
* spl8() Because you can't be above 15 to begin with!
* splzs() Because this is used at boot time to lower our
* priority, to allow the PROM to poll the uart.
* spl0() Used to lower priority to 0.
*/
#if defined(__lint)
int spl0(void) { return (0); }
int spl6(void) { return (0); }
int spl7(void) { return (0); }
int spl8(void) { return (0); }
int splhigh(void) { return (0); }
int splhi(void) { return (0); }
int splzs(void) { return (0); }
#else /* __lint */
/* reg = cpu->cpu_m.cpu_pri; */
/* cpu->cpu_m.cpu_pri; */
/* reg = cpu->cpu_m.cpu_pri; */
/* cpu->cpu_m.cpu_pri; */
/*
* Macro to raise processor priority level.
* Avoid dropping processor priority if already at high level.
* Also avoid going below CPU->cpu_base_spl, which could've just been set by
* a higher-level interrupt thread that just blocked.
*/
#if defined(__amd64)
cli; \
cli; \
#endif /* __i386 */
/*
* Macro to set the priority to a specified level.
* Avoid dropping the priority below CPU->cpu_base_spl.
*/
#if defined(__amd64)
cli; \
cli; \
#endif /* __i386 */
/* locks out all interrupts, including memory errors */
SETPRI(15)
/* just below the level that profiling runs */
RAISE(13)
/* sun specific - highest priority onboard serial i/o asy ports */
/*
* should lock out clocks and all interrupts,
* as you can see, there are exceptions
*/
#if defined(__amd64)
.align 16
/*
* If we aren't using cr8 to control ipl then we patch this
* with a jump to slow_setsplhi
*/
/*
* enable interrupts
*/
nop /* patch this to a sti when a proper setspl routine appears */
.align 16
/*
* enable interrupts
*
* (we patch this to an sti once a proper setspl routine
* is installed)
*/
nop /* patch this to a sti when a proper setspl routine appears */
#endif /* __i386 */
/* allow all interrupts */
SETPRI(0)
#endif /* __lint */
/*
* splr is like splx but will only raise the priority and never drop it
*/
#if defined(__lint)
/* ARGSUSED */
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
nop /* patch this to a sti when a proper setspl routine appears */
ret /* else return the current level */
nop /* patch this to a sti when a proper setspl routine appears */
ret /* else return the current level */
#endif /* __i386 */
#endif /* __lint */
/*
* splx - set PIL back to that indicated by the level passed as an argument,
* or to the CPU's base priority, whichever is higher.
* Needs to be fall through to spl to save cycles.
* Algorithm for spl:
*
* turn off interrupts
*
* if (CPU->cpu_base_spl > newipl)
* newipl = CPU->cpu_base_spl;
* oldipl = CPU->cpu_pridata->c_ipl;
* CPU->cpu_pridata->c_ipl = newipl;
*
* /indirectly call function to set spl values (usually setpicmasks)
* setspl(); // load new masks into pics
*
* Be careful not to set priority lower than CPU->cpu_base_pri,
* even though it seems we're raising the priority, it could be set
* higher at any time by an interrupt routine, so we must block interrupts
* and look at CPU->cpu_base_pri
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
cli /* disable interrupts */
/*FALLTHRU*/
.align 4
spl:
/*
* New priority level is in %edi, cpu struct pointer is in %rcx
*/
/*
* If we aren't using cr8 to control ipl then we patch this
* with a jump to slow_spl
*/
/* stack now 16-byte aligned */
nop /* patch this to a sti when a proper setspl routine appears */
/* stack now 16-byte aligned */
/*
* enable interrupts
*/
nop /* patch this to a sti when a proper setspl routine appears */
cli /* disable interrupts */
/*FALLTHRU*/
.align 4
/*
* New priority level is in %edx
* (doing this early to avoid an AGI in the next instruction)
*/
/*
* Before dashing off, check that setsplsti has been patched.
*/
/*
* enable interrupts
*/
nop /* patch this to a sti when a proper setspl routine appears */
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
void
install_spl(void)
{}
#else /* __lint */
#if defined(__amd64)
jmp 1f
2:
/*
* Patch spl functions to use slow spl method
*/
/*
* Ensure %cr8 is zero since we aren't using it
*/
jmp 1f
#endif /* __i386 */
#endif /* __lint */
/*
* Get current processor interrupt level
*/
#if defined(__lint)
int
getpil(void)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__i386)
/*
* Read and write the %gs register
*/
#if defined(__lint)
/*ARGSUSED*/
getgs(void)
{ return (0); }
/*ARGSUSED*/
void
{}
#else /* __lint */
#endif /* __lint */
#endif /* __i386 */
#if defined(__lint)
void
pc_reset(void)
{}
#else /* __lint */
/*NOTREACHED*/
#endif /* __lint */
/*
* C callable in and out routines
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
inl(int port_address)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
inw(int port_address)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
inb(int port_address)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
/*
* The arguments and saved registers are on the stack in the
* following order:
* | cnt | +16
* | *addr | +12
* | port | +8
* | eip | +4
* | esi | <-- %esp
* If additional values are pushed onto the stack, make sure
* to adjust the following constants accordingly.
*/
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
/*
* The arguments and saved registers are on the stack in the
* following order:
* | cnt | +16
* | *addr | +12
* | port | +8
* | eip | +4
* | esi | <-- %esp
* If additional values are pushed onto the stack, make sure
* to adjust the following constants accordingly.
*/
#endif /* __i386 */
#endif /* __lint */
/*
* Input a stream of 32-bit words.
* NOTE: count is a DWORD count.
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Output a stream of bytes
* NOTE: count is a byte count
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Output a stream of 32-bit words
* NOTE: count is a DWORD count
*/
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* void int20(void)
*/
#if defined(__lint)
void
int20(void)
{}
#else /* __lint */
jz 1f
int $20
1:
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
/* rdi == size */
/* rsi == cp */
/* rdx == table */
/* rcx == mask */
.scanloop:
.scandone:
.scanloop:
.scandone:
#endif /* __i386 */
#endif /* __lint */
/*
* Replacement functions for ones that are normally inlined.
* In addition to the copy in i86.il, they are defined here just in case.
*/
#if defined(__lint)
int
intr_clear(void)
{ return 0; }
int
clear_int_flag(void)
{ return 0; }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
struct cpu *
curcpup(void)
{ return 0; }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
{ return (0); }
/* ARGSUSED */
{ return (0); }
#else /* __lint */
#if defined(__amd64)
/* XX64 there must be shorter sequences for this */
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
{ return (0); }
/* ARGSUSED */
{ return (0); }
#else /* __lint */
#if defined(__amd64)
/* XX64 there must be better sequences for this */
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
void
{ return; }
/* ARGSUSED */
void
restore_int_flag(int i)
{ return; }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
void
sti(void)
{}
#else /* __lint */
#endif /* __lint */
#if defined(__lint)
dtrace_interrupt_disable(void)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(lint)
void
dtrace_membar_producer(void)
{}
void
dtrace_membar_consumer(void)
{}
#else /* __lint */
#endif /* __lint */
#if defined(__lint)
threadp(void)
{ return ((kthread_id_t)0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
/*
* Checksum routine for Internet Protocol Headers
*/
#if defined(__lint)
/* ARGSUSED */
unsigned int
int halfword_count, /* length of data */
unsigned int sum) /* partial checksum */
{
int i;
unsigned int psum = 0; /* partial sum */
for (i = 0; i < halfword_count; i++, address++) {
}
while ((psum >> 16) != 0) {
}
while ((psum >> 16) != 0) {
}
return (psum);
}
#else /* __lint */
#if defined(__amd64)
#ifdef DEBUG
jnb 1f
/*NOTREACHED*/
.string "ip_ocsum: address 0x%p below kernelbase\n"
1:
#endif
/* partial sum in %edx */
.ip_csum_aligned: /* XX64 opportunities for 8-byte operations? */
/* XX64 opportunities for prefetch? */
/* XX64 compute csum with 64 bit quantities? */
.only60:
.only56:
.only52:
.only48:
.only44:
.only40:
.only36:
.only32:
.only28:
.only24:
.only20:
.only16:
.only12:
.only8:
.only4:
.only0:
.align 8
.only60:
.only56:
.only52:
.only48:
.only44:
.only40:
.only36:
.only32:
.only28:
.only24:
.only20:
.only16:
.only12:
.only8:
.only4:
.only0:
.data
.align 4
#endif /* __i386 */
#endif /* __lint */
/*
* multiply two long numbers and yield a u_longlong_t result, callable from C.
* Provided to manipulate hrtime_t values.
*/
#if defined(__lint)
/* result = a * b; */
/* ARGSUSED */
unsigned long long
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/* ARGSUSED */
long long
__mul64(long long a, long long b)
{ return (0); }
#else /* __lint */
/*
* function __mul64(A, B:Longint):Longint;
* {Overflow is not checked}
*
* We essentially do multiply by longhand, using base 2**32 digits.
* a b parameter A
* x c d parameter B
* ---------
* ad bd
* ac bc
* -----------------
* ac ad+bc bd
*
* We can ignore ac and top 32 bits of ad+bc: if <> 0, overflow happened.
*/
ret $16
#endif /* __lint */
#if defined(__lint)
/*
* C support for 64-bit modulo and division.
* GNU routines callable from C (though generated by the compiler).
* Hand-customized compiler output - see comments for details.
*/
/*ARGSUSED*/
unsigned long long
__udivdi3(unsigned long long a, unsigned long long b)
{ return (0); }
/*ARGSUSED*/
unsigned long long
__umoddi3(unsigned long long a, unsigned long long b)
{ return (0); }
/*ARGSUSED*/
long long
__divdi3(long long a, long long b)
{ return (0); }
/*ARGSUSED*/
long long
__moddi3(long long a, long long b)
{ return (0); }
/* ARGSUSED */
{ return (0); }
/* ARGSUSED */
{ return (0); }
/* ARGSUSED */
{ return (0); }
/* ARGSUSED */
{ return (0); }
/* ARGSUSED */
{ return (0); }
/* ARGSUSED */
{ return (0); }
#else /* __lint */
/*
*
* Hand-customized compiler output: the non-GCC entry points depart from
* the SYS V ABI by requiring their arguments to be popped, and in the
* [u]divrem64 cases returning the remainder in %ecx:%esi. Note the
* compiler-generated use of %edx:%eax for the first argument of
* internal entry points.
*
* Inlines for speed:
* - counting the number of leading zeros in a word
* - multiplying two 32-bit numbers giving a 64-bit result
* - dividing a 64-bit number by a 32-bit number, giving both quotient
* and remainder
* - subtracting two 64-bit results
*/
/
/ /* give index of highest bit */
/ #define HIBIT(a, r) \
/
/ /* multiply two uint32_ts resulting in a uint64_t */
/ asm("mull %2" \
/
/ /* divide a uint64_t by a uint32_t */
/ asm("divl %2" \
/
/ /* subtract two uint64_ts (with borrow) */
/ asm("subl %4,%0\n\tsbbl %5,%1" \
/
/ /*
/ * Unsigned division with remainder.
/ * Divide two uint64_ts, and calculate remainder.
/ */
/ uint64_t
/ {
/ /* simple cases: y is a single uint32_t */
/ if (HI(y) == 0) {
/
/ /* calculate q1 */
/ /* result is a single uint32_t, use one division */
/ q1 = 0;
/ } else {
/ /* result is a double uint32_t, use two divisions */
/ }
/
/ /* calculate q0 and remainder */
/
/ /* return remainder */
/
/ /* return result */
/
/ /* HI(x) < HI(y) => x < y => result is 0 */
/
/ /* return remainder */
/ *pmod = x;
/
/ /* return result */
/ return (0);
/
/ } else {
/ /*
/ * uint64_t by uint64_t division, resulting in a one-uint32_t
/ * result
/ */
/
/ /* normalize by shifting x and y so MSB(y) == 1 */
/
/ if (normshift == 0) {
/ /* no shifting needed, and x < 2*y so q <= 1 */
/
/ /* if x >= y then q = 1 (note x1 >= y1) */
/ q0 = 1;
/ /* subtract y from x to get remainder */
/ } else {
/ q0 = 0;
/ }
/
/ /* return remainder */
/
/ /* return result */
/ return (q0);
/
/ } else {
/ /*
/ * the last case: result is one uint32_t, but we need to
/ * normalize
/ */
/
/ /* normalize y */
/
/ /* normalize x (we need 3 uint32_ts!!!) */
/
/ /* estimate q0, and reduce x to a two uint32_t value */
/
/ /* adjust q0 down if too high */
/ /*
/ * because of the limited range of x2 we can only be
/ * one off
/ */
/ q0--;
/ }
/ /* return remainder */
/ /* subtract product from x to get remainder */
/
/ /* return result */
/ return (q0);
/ }
/ }
/ }
.LL4:
.align 16
.LL2:
.LL22:
.align 16
.LL21:
.align 16
.LL6:
.LL10:
.LL11:
.align 16
.LL8:
.LL17:
.LL18:
.LL15:
.LL16:
.align 16
.LL24:
.LL14:
.LL23:
/*
* Unsigned division without remainder.
*/
/ uint64_t
/ {
/ if (HI(y) == 0) {
/ /* simple cases: y is a single uint32_t */
/
/ /* calculate q1 */
/ /* result is a single uint32_t, use one division */
/ q1 = 0;
/ } else {
/ /* result is a double uint32_t, use two divisions */
/ }
/
/ /* calculate q0 and remainder */
/
/ /* return result */
/
/ /* HI(x) < HI(y) => x < y => result is 0 */
/
/ /* return result */
/ return (0);
/
/ } else {
/ /*
/ * uint64_t by uint64_t division, resulting in a one-uint32_t
/ * result
/ */
/ unsigned normshift;
/
/ /* normalize by shifting x and y so MSB(y) == 1 */
/
/ if (normshift == 0) {
/ /* no shifting needed, and x < 2*y so q <= 1 */
/
/ /* if x >= y then q = 1 (note x1 >= y1) */
/ q0 = 1;
/ /* subtract y from x to get remainder */
/ /* A_SUB2(y0, y1, x0, x1); */
/ } else {
/ q0 = 0;
/ }
/
/ /* return result */
/ return (q0);
/
/ } else {
/ /*
/ * the last case: result is one uint32_t, but we need to
/ * normalize
/ */
/
/ /* normalize y */
/
/ /* normalize x (we need 3 uint32_ts!!!) */
/
/ /* estimate q0, and reduce x to a two uint32_t value */
/
/ /* adjust q0 down if too high */
/ /*
/ * because of the limited range of x2 we can only be
/ * one off
/ */
/ q0--;
/ }
/ /* return result */
/ return (q0);
/ }
/ }
/ }
.LL28:
.LL25:
.align 16
.LL26:
.LL34:
.LL35:
.LL45:
.align 16
.LL32:
.LL40:
.LL41:
.LL39:
.LL46:
.LL44:
.LL38:
.LL43:
/*
* __udivdi3
*
* Perform division of two unsigned 64-bit quantities, returning the
* quotient in %edx:%eax.
*/
/*
* __umoddi3
*
* Perform division of two unsigned 64-bit quantities, returning the
* remainder in %edx:%eax.
*/
/*
* __divdi3
*
* Perform division of two signed 64-bit quantities, returning the
* quotient in %edx:%eax.
*/
/ int64_t
/ {
/ int negative;
/
/ if (x < 0) {
/ negative = 1;
/ } else {
/ xt = x;
/ negative = 0;
/ }
/ if (y < 0) {
/ negative ^= 1;
/ } else {
/ yt = y;
/ }
/ }
.LL53:
.LL54:
.align 16
.LL55:
.align 16
.LL56:
/*
* __moddi3
*
* Perform division of two signed 64-bit quantities, returning the
* quotient in %edx:%eax.
*/
/ int64_t
/ {
/
/ if (x < 0) {
/ } else {
/ xt = x;
/ }
/ if (y < 0) {
/ } else {
/ yt = y;
/ }
/ }
.LL61:
.align 16
.LL63:
.align 16
.LL64:
.align 16
.LL65:
/*
* __udiv64
*
* Perform division of two unsigned 64-bit quantities, returning the
* quotient in %edx:%eax. __udiv64 pops the arguments on return,
*/
ret $16
/*
* __urem64
*
* Perform division of two unsigned 64-bit quantities, returning the
* remainder in %edx:%eax. __urem64 pops the arguments on return
*/
ret $16
/*
* __div64
*
* Perform division of two signed 64-bit quantities, returning the
* quotient in %edx:%eax. __div64 pops the arguments on return.
*/
/ int64_t
/ {
/ int negative;
/
/ if (x < 0) {
/ negative = 1;
/ } else {
/ xt = x;
/ negative = 0;
/ }
/ if (y < 0) {
/ negative ^= 1;
/ } else {
/ yt = y;
/ }
/ }
.LL82:
.LL83:
ret $16
.align 16
.LL84:
.align 16
.LL85:
/*
* __rem64
*
* Perform division of two signed 64-bit quantities, returning the
* remainder in %edx:%eax. __rem64 pops the arguments on return.
*/
/ int64_t
/ {
/
/ if (x < 0) {
/ } else {
/ xt = x;
/ }
/ if (y < 0) {
/ } else {
/ yt = y;
/ }
/ }
.LL90:
ret $16
.align 16
.LL92:
.align 16
.LL93:
.align 16
.LL94:
ret $16
/*
* __udivrem64
*
* Perform division of two unsigned 64-bit quantities, returning the
* quotient in %edx:%eax, and the remainder in %ecx:%esi. __udivrem64
* pops the arguments on return.
*/
ret $16
/*
* Signed division with remainder.
*/
/ int64_t
/ {
/ int negative;
/
/ if (x < 0) {
/ negative = 1;
/ } else {
/ xt = x;
/ negative = 0;
/ }
/ if (y < 0) {
/ negative ^= 1;
/ } else {
/ yt = y;
/ }
/ }
.LL70:
.LL71:
.LL72:
.align 16
.LL73:
.align 16
.LL74:
.align 16
.LL75:
/*
* __divrem64
*
* Perform division of two signed 64-bit quantities, returning the
* quotient in %edx:%eax, and the remainder in %ecx:%esi. __divrem64
* pops the arguments on return.
*/
ret $16
#endif /* __lint */
#endif /* __i386 */
#if defined(notused)
#if defined(__lint)
/* ARGSUSED */
void
{}
#else /* __lint */
#endif /* __lint */
#endif /* notused */
#if defined(__lint)
/*ARGSUSED*/
void
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED */
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
{ return (0); }
/*ARGSUSED*/
void
{}
void
invalidate_cache(void)
{}
#else /* __lint */
#if defined(__amd64)
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
{}
#else /* __lint */
#if defined(__amd64)
rdmsr; \
.nocr4:
.skip:
#endif /* __i386 */
#endif /* __lint */
/*
* A panic trigger is a word which is updated atomically and can only be set
* once. We atomically store 0xDEFACEDD and load the old value. If the
* previous value was 0, we succeed and return 1; otherwise return 0.
* This allows a partially corrupt trigger to still trigger correctly. DTrace
* has its own version of this function to allow it to panic correctly from
* probe context.
*/
#if defined(__lint)
/*ARGSUSED*/
int
panic_trigger(int *tp)
{ return (0); }
/*ARGSUSED*/
int
dtrace_panic_trigger(int *tp)
{ return (0); }
#else /* __lint */
#if defined(__amd64)
je 0f
je 0f
je 0f / return (1);
ret / return (0);
je 0f / return (1);
ret / return (0);
#endif /* __i386 */
#endif /* __lint */
/*
* The panic() and cmn_err() functions invoke vpanic() as a common entry point
* into the panic code implemented in panicsys(). vpanic() is responsible
* for passing through the format string and arguments, and constructing a
* regs structure on the stack into which it saves the current register
* values. If we are not dying due to a fatal trap, these registers will
* then be preserved in panicbuf as the current processor state. Before
* invoking panicsys(), vpanic() activates the first panic trigger (see
* DTrace takes a slightly different panic path if it must panic from probe
* context. Instead of calling panic, it calls into dtrace_vpanic(), which
* sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
* branches back into vpanic().
*/
#if defined(__lint)
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
#else /* __lint */
#if defined(__amd64)
pushfq /* | rfl | 0x50 */
je 0f
/*
* If panic_trigger() was successful, we are the first to initiate a
* panic: we now switch to the reserved panic_stack before continuing.
*/
/*
* Now that we've got everything set up, store the register values as
* they were when we entered vpanic() to the designated location in
* the regs structure we allocated on the stack.
*/
/*
* panicsys(format, alist, rp, on_panic_stack)
*/
pushfq /* | rfl | 0x50 */
je 0f / goto 0f;
/*
* If panic_trigger() was successful, we are the first to initiate a
* panic: we now switch to the reserved panic_stack before continuing.
*/
/*
* Now that we've got everything set up, store the register values as
* they were when we entered vpanic() to the designated location in
* the regs structure we allocated on the stack.
*/
#if !defined(__GNUC_AS__)
#else /* __GNUC_AS__ */
#endif /* __GNUC_AS__ */
#if !defined(__GNUC_AS__)
#else /* __GNUC_AS__ */
#endif /* __GNUC_AS__ */
#if !defined(__GNUC_AS__)
#else /* __GNUC_AS__ */
#endif /* __GNUC_AS__ */
#endif /* __i386 */
#endif /* __lint */
#if defined(__lint)
void
hres_tick(void)
{}
volatile int hres_lock;
#else /* __lint */
.NWORD 0, 0
.long 0, 0
.long 0, 0
.long 0, 0
.long 0
/*
* initialized to a non zero value to make pc_gethrtime()
* work correctly even before clock is initialized
*/
.long ADJ_SHIFT
#if defined(__amd64)
/*
* We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
* hres_last_tick can only be modified while holding CLOCK_LOCK).
* At worst, performing this now instead of under CLOCK_LOCK may
* introduce some jitter in pc_gethrestime().
*/
.CL1:
.CL2:
.CL3:
/*
* compute the interval since last time hres_tick was called
* and adjust hrtime_base and hrestime accordingly
* hrtime_base is an 8 byte value (in nsec), hrestime is
* a timestruc_t (sec, nsec)
*/
/*
* Now that we have CLOCK_LOCK, we can update hres_last_tick
*/
/*
* release the hres_lock
*/
/*
* We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
* hres_last_tick can only be modified while holding CLOCK_LOCK).
* At worst, performing this now instead of under CLOCK_LOCK may
* introduce some jitter in pc_gethrestime().
*/
.CL1:
.CL2:
.CL3:
/*
* compute the interval since last time hres_tick was called
* and adjust hrtime_base and hrestime accordingly
* hrtime_base is an 8 byte value (in nsec), hrestime is
* timestruc_t (sec, nsec)
*/
/
/
/
/
/ (max_hres_adj)
/
/ void
/ adj_hrestime()
/ {
/ long long adj;
/
/ if (hrestime_adj == 0)
/ adj = 0;
/ else if (hrestime_adj > 0) {
/ if (hrestime_adj < HRES_ADJ)
/ adj = hrestime_adj;
/ else
/ }
/ else {
/ if (hrestime_adj < -(HRES_ADJ))
/ else
/ adj = hrestime_adj;
/ }
/
/ hrestime_adj = timedelta;
/
/ one_sec++;
/ }
/ }
.CL4:
/
/
/ !(hrestime_adj < HRES_ADJ)
/
/
/
/
/
/
/
/ (hrestime_adj > -HRES_ADJ)
/
/
/
/
/
.CL7:
.CL5:
1:
.CL8:
#endif /* __i386 */
#endif /* __lint */
/*
* void prefetch_smap_w(void *)
*
* Prefetch ahead within a linear list of smap structures.
* Not implemented for ia32. Stub for compatibility.
*/
#if defined(__lint)
/*ARGSUSED*/
void prefetch_smap_w(void *smp)
{}
#else /* __lint */
#endif /* __lint */
/*
* prefetch_page_r(page_t *)
* issue prefetch instructions for a page_t
*/
#if defined(__lint)
/*ARGSUSED*/
void
prefetch_page_r(void *pp)
{}
#else /* __lint */
#endif /* __lint */
#if defined(__lint)
/*ARGSUSED*/
int
{ return (0); }
#else /* __lint */
#if defined(__amd64)
#ifdef DEBUG
jb 0f
jnb 1f
1:
#endif /* DEBUG */
#define ARG_S1 8
#define ARG_S2 12
#define ARG_LENGTH 16
#ifdef DEBUG
jb 0f
jnb 1f
0: pushl $.bcmp_panic_msg
#endif /* DEBUG */
.align 4
.align 4
.equal:
.align 4
#endif /* __i386 */
#ifdef DEBUG
.text
.string "bcmp: arguments below kernelbase"
#endif /* DEBUG */
#endif /* __lint */