/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* Configuration guide
* -------------------
*
* There are 4 preprocessor symbols used to configure the bignum
* implementation. This file contains no logic to configure based on
* processor; we leave that to the Makefiles to specify.
*
* USE_FLOATING_POINT
* Meaning: There is support for a fast floating-point implementation of
* Montgomery multiply.
*
* PSR_MUL
* Meaning: There are processor-specific versions of the low level
* functions to implement big_mul. Those functions are: big_mul_set_vec,
* big_mul_add_vec, big_mul_vec, and big_sqr_vec. PSR_MUL implies support
* for all 4 functions. You cannot pick and choose which subset of these
* functions to support; that would lead to a rat's nest of #ifdefs.
*
* HWCAP
* Meaning: Call multiply support functions through a function pointer.
* On x86, there are multiple implementations for different hardware
* capabilities, such as MMX, SSE2, etc. Tests are made at run-time, when
* a function is first used. So, the support functions are called through
* a function pointer. There is no need for that on Sparc, because there
* is only one implementation; support functions are called directly.
* Later, if there were some new VIS instruction, or something, and a
* run-time test were needed, rather than variant kernel modules and
* libraries, then HWCAP would be defined for Sparc, as well.
*
* UMUL64
* Meaning: It is safe to use generic C code that assumes the existence
* of a 32 x 32 --> 64 bit unsigned multiply. If this is not defined,
* then the generic code for big_mul_add_vec() must necessarily be very slow,
* because it must fall back to using 16 x 16 --> 32 bit multiplication.
*
*/
#include "bignum.h"
#ifdef _KERNEL
#else
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#endif /* _KERNEL */
#ifdef __amd64
#ifdef _KERNEL
#else
#endif /* _KERNEL */
#endif /* __amd64 */
#ifdef _LP64 /* truncate 64-bit size_t to 32-bits */
#else /* size_t already 32-bits */
#endif
#ifdef _KERNEL
/*
* big_realloc()
* Allocate memory of newsize bytes and copy oldsize bytes
* to the newly-allocated memory, then free the
* previously-allocated memory.
* Note: newsize must be > oldsize
*/
void *
{
void *rv;
return (rv);
}
#else /* _KERNEL */
#ifndef MALLOC_DEBUG
#else
void
{
}
void *
{
void *rv;
return (rv);
}
#endif /* MALLOC_DEBUG */
/*
* printbignum()
* Print a BIGNUM type to stdout.
*/
void
{
int i;
for (i = a->len - 1; i >= 0; i--) {
#ifdef BIGNUM_CHUNK_32
if (((i & (BITSINBYTE - 1)) == 0) && (i != 0)) {
(void) printf("\n");
}
#else
if (((i & 3) == 0) && (i != 0)) { /* end of this chunk */
(void) printf("\n");
}
#endif
}
(void) printf("\n");
}
#endif /* _KERNEL */
#ifdef __amd64
/*
* Return 1 if executing on Intel, otherwise 0 (e.g., AMD64).
* Cache the result, as the CPU can't change.
*
* Note: the userland version uses getisax() and checks for an AMD-64-only
* feature. The kernel version uses cpuid_getvendor().
*/
static int
bignum_on_intel(void)
{
#ifdef _KERNEL
#else
#endif /* _KERNEL */
}
return (cached_result);
}
#endif /* __amd64 */
/*
* big_init()
* Initialize and allocate memory for a BIGNUM type.
*
* big_init(number, size) is equivalent to big_init1(number, size, NULL, 0)
*
* Note: call big_finish() to free memory allocated by big_init().
*
* Input:
* number Uninitialized memory for BIGNUM
* size Minimum size, in BIG_CHUNK_SIZE-bit words, required for BIGNUM
*
* Output:
* number Initialized BIGNUM
*
* Return BIG_OK on success or BIG_NO_MEM for an allocation error.
*/
{
return (BIG_NO_MEM);
}
return (BIG_OK);
}
/*
* big_init1()
* Initialize and, if needed, allocate memory for a BIGNUM type.
* Use the buffer passed, buf, if any, instad of allocating memory
* if it's at least "size" bytes.
*
* Note: call big_finish() to free memory allocated by big_init().
*
* Input:
* number Uninitialized memory for BIGNUM
* size Minimum size, in BIG_CHUNK_SIZE-bit words, required for BIGNUM
* buf Buffer for storing a BIGNUM.
* If NULL, big_init1() will allocate a buffer
* bufsize Size, in BIG_CHUNK_SIZE_bit words, of buf
*
* Output:
* number Initialized BIGNUM
*
* Return BIG_OK on success or BIG_NO_MEM for an allocation error.
*/
{
return (BIG_NO_MEM);
}
} else {
}
return (BIG_OK);
}
/*
* big_finish()
* Free memory, if any, allocated by big_init() or big_init1().
*/
void
{
}
}
/*
* bn->size should be at least
* (len + BIGNUM_WORDSIZE - 1) / BIGNUM_WORDSIZE bytes
* converts from byte-big-endian format to bignum format (words in
* little endian order, but bytes within the words big endian)
*/
void
{
int i, j;
if (slen == 0) {
return;
}
for (i = 0; i < slen / BIGNUM_WORDSIZE; i++) {
for (j = 1; j < BIGNUM_WORDSIZE; j++) {
}
}
if (offs > 0) {
}
}
}
/*
* copies the least significant len bytes if
* len < bn->len * BIGNUM_WORDSIZE
* converts from bignum format to byte-big-endian format.
* bignum format is words of type BIG_CHUNK_TYPE in little endian order.
*/
void
{
int i, j;
for (i = 0; i < slen / BIGNUM_WORDSIZE; i++) {
for (j = 0; j < BIGNUM_WORDSIZE; j++) {
word & 0xff;
}
}
if (offs > 0) {
for (i = slen % BIGNUM_WORDSIZE; i > 0; i --) {
}
}
} else {
for (j = 0; j < BIGNUM_WORDSIZE; j++) {
word & 0xff;
}
}
kn[i] = 0;
}
}
}
int
{
int l = 0, b = 0;
l = a->len - 1;
while ((l > 0) && (a->value[l] == 0)) {
l--;
}
b = BIG_CHUNK_SIZE;
c = a->value[l];
while ((b > 1) && ((c & BIG_CHUNK_HIGHBIT) == 0)) {
c = c << 1;
b--;
}
return (l * BIG_CHUNK_SIZE + b);
}
/*
* big_copy()
* Copy BIGNUM src to dest, allocating memory if needed.
*/
{
int i, len;
len--;
}
BIGNUM_WORDSIZE * len);
} else {
newptr = (BIG_CHUNK_TYPE *)
}
}
return (BIG_NO_MEM);
}
}
for (i = 0; i < len; i++) {
}
return (BIG_OK);
}
/*
* big_extend()
* Allocate memory to extend BIGNUM number to size bignum chunks,
* if not at least that size already.
*/
{
int i;
return (BIG_OK);
BIGNUM_WORDSIZE * size);
} else {
}
}
}
return (BIG_NO_MEM);
}
return (BIG_OK);
}
/* returns 1 if n == 0 */
int
{
int i, result;
result = 1;
for (i = 0; i < n->len; i++) {
if (n->value[i] != 0) {
result = 0;
}
}
return (result);
}
{
BIG_CHUNK_TYPE *r, *a, *b, *c;
} else {
}
return (err);
}
}
cy = 0;
for (i = 0; i < shorter; i++) {
ai = a[i];
if (r[i] > ai) {
cy = 0;
} else if (r[i] < ai) {
cy = 1;
}
}
for (; i < longer; i++) {
ai = c[i];
if (r[i] >= ai) {
cy = 0;
}
}
if (cy == 1) {
r[i] = cy;
} else {
}
return (BIG_OK);
}
/* caller must make sure that result has at least len words allocated */
void
{
int i;
cy = 1;
for (i = 0; i < len; i++) {
ai = a[i];
if (r[i] > ai) {
cy = 0;
} else if (r[i] < ai) {
cy = 1;
}
}
}
/* result=aa-bb it is assumed that aa>=bb */
{
int i, shorter;
BIG_CHUNK_TYPE *r, *a, *b;
} else {
}
return (err);
}
}
cy = 1;
for (i = 0; i < shorter; i++) {
ai = a[i];
if (r[i] > ai) {
cy = 0;
} else if (r[i] < ai) {
cy = 1;
}
}
ai = a[i];
if (r[i] < ai) {
cy = 1;
}
}
if (cy == 0) {
return (BIG_INVALID_ARGS);
} else {
return (BIG_OK);
}
}
/* returns -1 if |aa|<|bb|, 0 if |aa|==|bb| 1 if |aa|>|bb| */
int
{
int i;
return (1);
}
}
return (-1);
}
}
} else {
}
for (; i >= 0; i--) {
return (1);
return (-1);
}
}
return (0);
}
{
return (err);
}
return (err);
}
return (err);
}
} else {
return (err);
}
}
} else {
return (err);
}
} else {
return (err);
}
}
}
return (BIG_OK);
}
{
return (err);
}
return (err);
}
return (err);
}
} else {
return (err);
}
}
} else {
return (err);
}
} else {
return (err);
}
}
}
return (BIG_OK);
}
/* result = aa/2 */
{
int i;
BIG_CHUNK_TYPE *a, *r;
return (err);
}
}
cy = 0;
r[i] = (cy | (a[i] >> 1));
}
}
return (BIG_OK);
}
/* result = aa*2 */
{
int i, rsize;
BIG_CHUNK_TYPE *a, *r;
} else {
}
return (err);
}
}
cy = 0;
r[i] = (cy | (a[i] << 1));
}
return (BIG_OK);
}
/*
* returns aa mod b, aa must be nonneg, b must be a max
* (BIG_CHUNK_SIZE / 2)-bit integer
*/
static uint32_t
{
int i;
return (0);
}
}
}
/*
* result = aa - (2^BIG_CHUNK_SIZE)^lendiff * bb
* result->size should be at least aa->len at entry
* aa, bb, and result should be positive
*/
void
{
int i, lendiff;
for (i = 0; i < lendiff; i++) {
}
}
}
/*
* returns 1, 0, or -1 depending on whether |aa| > , ==, or <
* (2^BIG_CHUNK_SIZE)^lendiff * |bb|
* aa->len should be >= bb->len
*/
int
{
int lendiff;
}
/*
* result = aa * b where b is a max. (BIG_CHUNK_SIZE / 2)-bit positive integer.
* result should have enough space allocated.
*/
static void
{
int i;
BIG_CHUNK_TYPE *a, *r;
cy = 0;
ai = a[i];
r[i] = (t1 & BIG_CHUNK_LOWHALFBITS) |
}
r[i] = cy;
}
/*
* result = aa * b * 2^(BIG_CHUNK_SIZE / 2) where b is a max.
* (BIG_CHUNK_SIZE / 2)-bit positive integer.
* result should have enough space allocated.
*/
static void
{
int i;
BIG_CHUNK_TYPE *a, *r;
cy = 0;
ri = 0;
ai = a[i];
}
}
/* it is assumed that result->size is big enough */
void
{
int i;
if (offs == 0) {
}
return;
}
cy = 0;
}
if (cy != 0) {
} else {
}
}
/* it is assumed that result->size is big enough */
void
{
int i;
if (offs == 0) {
}
return;
}
}
}
/*
* it is assumed that aa and bb are positive
*/
{
BIG_CHUNK_TYPE *a, *b;
}
}
if ((blen == 1) && (b[0] == 0)) {
return (BIG_DIV_BY_0);
}
return (err);
}
}
return (BIG_OK);
}
return (err);
goto ret1;
goto ret2;
goto ret3;
goto ret4;
offs = 0;
}
while ((highb & BIG_CHUNK_HALF_HIGHBIT) == 0) {
offs++;
}
} else {
}
} else {
}
for (i = 0; i < rlen; i++) {
coeff++;
}
tlen--;
coeff++;
}
coeff++;
}
}
goto ret;
ret:
ret4:
big_finish(&tmp1);
ret3:
big_finish(&tmp2);
ret2:
big_finish(&bbhigh);
ret1:
big_finish(&bblow);
return (err);
}
/*
* If there is no processor-specific integer implementation of
* the lower level multiply functions, then this code is provided
* for big_mul_set_vec(), big_mul_add_vec(), big_mul_vec() and
* big_sqr_vec().
*
* There are two generic implementations. One that assumes that
* there is hardware and C compiler support for a 32 x 32 --> 64
* bit unsigned multiply, but otherwise is not specific to any
* processor, platform, or ISA.
*
* The other makes very few assumptions about hardware capabilities.
* It does not even assume that there is any implementation of a
* 32 x 32 --> 64 bit multiply that is accessible to C code and
* appropriate to use. It falls constructs 32 x 32 --> 64 bit
* multiplies from 16 x 16 --> 32 bit multiplies.
*
*/
#if !defined(PSR_MUL)
#ifdef UMUL64
#if (BIG_CHUNK_SIZE == 32)
#define UNROLL8
#define MUL_SET_VEC_ROUND_PREFETCH(R) \
p = pf * d; \
t = p + cy; \
r[R] = (uint32_t)t; \
cy = t >> 32
#define MUL_SET_VEC_ROUND_NOPREFETCH(R) \
p = pf * d; \
t = p + cy; \
r[R] = (uint32_t)t; \
cy = t >> 32
#define MUL_ADD_VEC_ROUND_PREFETCH(R) \
t = (uint64_t)r[R]; \
p = pf * d; \
t = p + t + cy; \
r[R] = (uint32_t)t; \
cy = t >> 32
#define MUL_ADD_VEC_ROUND_NOPREFETCH(R) \
t = (uint64_t)r[R]; \
p = pf * d; \
t = p + t + cy; \
r[R] = (uint32_t)t; \
cy = t >> 32
#ifdef UNROLL8
/*
* r = a * b
* where r and a are vectors; b is a single 32-bit digit
*/
{
if (len == 0)
return (0);
cy = 0;
d = (uint64_t)b;
r += UNROLL;
a += UNROLL;
}
}
while (len > 1) {
++r;
++a;
--len;
}
if (len > 0) {
}
}
/*
* r += a * b
* where r and a are vectors; b is a single 32-bit digit
*/
{
if (len == 0)
return (0);
cy = 0;
d = (uint64_t)b;
while (len > 8) {
r += 8;
a += 8;
len -= 8;
}
if (len == 8) {
}
while (len > 1) {
++r;
++a;
--len;
}
if (len > 0) {
}
}
#endif /* UNROLL8 */
void
{
uint32_t d;
tr = r + 1;
ta = a;
while (--tlen > 0) {
tr += 2;
++ta;
}
s = (uint64_t)a[0];
s = s * s;
r[0] = (uint32_t)s;
cy = s >> 32;
r[1] = (uint32_t)p;
cy = p >> 32;
row = 1;
col = 2;
s = s * s;
t = p + s;
d = (uint32_t)t;
break;
cy = p >> 32;
++row;
col += 2;
}
}
#else /* BIG_CHUNK_SIZE == 64 */
/*
* r = r + a * digit, r and a are vectors of length len
* returns the carry digit
*/
{
int i;
cy1 = 0;
for (i = 0; i < len; i++) {
dlow * (a[i] & BIG_CHUNK_LOWHALFBITS) +
(r[i] & BIG_CHUNK_LOWHALFBITS);
(r[i] >> (BIG_CHUNK_SIZE / 2));
r[i] = (cy & BIG_CHUNK_LOWHALFBITS) |
}
cy1 = r[0] & BIG_CHUNK_LOWHALFBITS;
for (i = 0; i < len - 1; i++) {
dhigh * (a[i] & BIG_CHUNK_LOWHALFBITS) +
(r[i] >> (BIG_CHUNK_SIZE / 2));
r[i] = (cy1 & BIG_CHUNK_LOWHALFBITS) |
(r[i + 1] & BIG_CHUNK_LOWHALFBITS);
}
return (retcy);
}
/*
* r = a * digit, r and a are vectors of length len
* returns the carry digit
*/
{
int i;
ASSERT(r != a);
for (i = 0; i < len; i++) {
r[i] = 0;
}
}
void
{
int i;
ASSERT(r != a);
for (i = 1; i < len; ++i)
}
#endif /* BIG_CHUNK_SIZE == 32/64 */
#else /* ! UMUL64 */
#if (BIG_CHUNK_SIZE != 32)
#error "Don't use 64-bit chunks without defining UMUL64"
#endif
/*
* r = r + a * digit, r and a are vectors of length len
* returns the carry digit
*/
{
int i;
cy1 = 0;
for (i = 0; i < len; i++) {
}
cy1 = r[0] & 0xffff;
for (i = 0; i < len - 1; i++) {
}
return (retcy);
}
/*
* r = a * digit, r and a are vectors of length len
* returns the carry digit
*/
{
int i;
ASSERT(r != a);
for (i = 0; i < len; i++) {
r[i] = 0;
}
}
void
{
int i;
ASSERT(r != a);
for (i = 1; i < len; ++i)
}
#endif /* UMUL64 */
void
BIG_CHUNK_TYPE *b, int blen)
{
int i;
for (i = 1; i < blen; ++i)
}
#endif /* ! PSR_MUL */
/*
* result = aa * bb result->value should be big enough to hold the result
*
* Implementation: Standard grammar school algorithm
*
*/
{
BIG_CHUNK_TYPE *r, *t, *a, *b;
diff = 0;
} else {
if (diff < 0) {
}
}
alen--;
}
blen--;
}
return (err);
}
/* aa or bb might be an alias to result */
}
r[0] = 0;
return (BIG_OK);
}
for (i = 0; i < blen; i++) {
r[i] = b[i];
}
return (BIG_OK);
}
for (i = 0; i < alen; i++) {
r[i] = a[i];
}
return (BIG_OK);
}
return (err);
}
for (i = 0; i < rsize; i++) {
t[i] = 0;
}
BIG_SQR_VEC(t, a, alen);
} else if (blen > 0) {
}
if (t[rsize - 1] == 0) {
} else {
}
big_finish(&tmp1);
return (err);
}
/*
* big_mont_mul()
* Montgomery multiplication.
*
* Caller must ensure that a < n, b < n, ret->size >= 2 * n->len + 1,
* and that ret is not n.
*/
{
#ifdef __amd64
#define BIG_CPU_UNKNOWN 0
if (big_cpu == BIG_CPU_UNKNOWN) {
}
#endif /* __amd64 */
return (err);
}
rr[i] = 0;
}
#ifdef __amd64 /* pipelining optimization for Intel 64, but not AMD64 */
/*
* Perform the following in two for loops to reduce the
* dependency between computing the carryover bits with
* BIG_MUL_ADD_VEC() and adding them, thus improving pipelining.
*/
for (i = 0; i < nlen; i++) {
}
for (i = 0; i < nlen; i++) {
j = i + nlen;
rr[++j] += 1;
carry[i] = 1;
}
}
} else
#endif /* __amd64 */
{ /* no pipelining optimization */
for (i = 0; i < nlen; i++) {
j = i + nlen;
rr[j] += c;
while (rr[j] < c) {
rr[++j] += 1;
c = 1;
}
}
}
needsubtract = 0;
needsubtract = 1;
else {
needsubtract = 1;
break;
break;
}
}
}
if (needsubtract)
else {
for (i = 0; i < nlen; i++) {
}
}
/* Remove leading zeros, but keep at least 1 digit: */
;
return (BIG_OK);
}
{
int i;
result = 0;
for (i = 0; i < BIG_CHUNK_SIZE; i++) {
} else {
}
}
return (result);
}
int
{
int i, j;
for (i = n->len - 1; i > 0; i--) {
if (n->value[i] != 0) {
break;
}
}
t = n->value[i];
for (j = BIG_CHUNK_SIZE; j > 0; j--) {
if ((t & BIG_CHUNK_HIGHBIT) == 0) {
t = t << 1;
} else {
return (BIG_CHUNK_SIZE * i + j);
}
}
return (0);
}
/* caller must make sure that a < n */
{
int len, i;
return (err);
}
for (i = 0; i < 2 * len; i++) {
}
goto ret;
}
ret:
big_finish(&rr);
return (err);
}
/* caller must make sure that a < n */
{
int len, i;
!= BIG_OK) {
return (err);
}
for (i = 0; i < 2 * len; i++) {
}
goto ret;
}
}
goto ret;
}
ret:
big_finish(&rr);
return (err);
}
#ifdef USE_FLOATING_POINT
#else
#endif
/* ARGSUSED */
static BIG_ERR_CODE
{
int i, j, k, l, m, p;
nbits = big_numbits(e);
if (nbits < 50) {
groupbits = 1;
apowerssize = 1;
} else {
}
return (err);
}
/* clear the malloced bit to help cleanup */
for (i = 0; i < apowerssize; i++) {
}
for (i = 0; i < apowerssize; i++) {
BIG_OK) {
goto ret;
}
}
goto ret;
}
for (i = 1; i < apowerssize; i++) {
goto ret;
}
}
k = 0;
l = 0;
p = 0;
bitcount = 0;
for (i = nbits / BIG_CHUNK_SIZE; i >= 0; i--) {
for (j = bitind - 1; j >= 0; j--) {
goto ret;
}
} else {
bitcount++;
p = p * 2 + bit;
if (bit == 1) {
k = k + l + 1;
l = 0;
} else {
l++;
}
for (m = 0; m < k; m++) {
BIG_OK) {
goto ret;
}
}
&(apowers[p >> (l + 1)]),
goto ret;
}
for (m = 0; m < l; m++) {
BIG_OK) {
goto ret;
}
}
k = 0;
l = 0;
p = 0;
bitcount = 0;
}
}
}
}
for (m = 0; m < k; m++) {
goto ret;
}
}
if (p != 0) {
goto ret;
}
}
for (m = 0; m < l; m++) {
goto ret;
}
}
ret:
for (i = apowerssize - 1; i >= 0; i--) {
big_finish(&(apowers[i]));
}
big_finish(&tmp1);
return (err);
}
#ifdef USE_FLOATING_POINT
#ifdef _KERNEL
#include <sys/sysmacros.h>
/* the alignment for block stores to save fp registers */
extern void big_savefp(kfpu_t *);
extern void big_restorefp(kfpu_t *);
#endif /* _KERNEL */
/*
* This version makes use of floating point for performance
*/
static BIG_ERR_CODE
{
int i, j, k, l, m, p;
double dn0;
#ifdef _KERNEL
#ifdef DEBUG
if (!fpu_exists)
return (BIG_GENERAL_ERR);
#endif
#endif /* _KERNEL */
nbits = big_numbits(e);
if (nbits < 50) {
groupbits = 1;
apowerssize = 1;
} else {
}
for (i = 0; i < apowerssize; i++) {
}
err = BIG_NO_MEM;
goto ret;
}
err = BIG_NO_MEM;
goto ret;
}
err = BIG_NO_MEM;
goto ret;
}
err = BIG_NO_MEM;
goto ret;
}
err = BIG_NO_MEM;
goto ret;
}
err = BIG_NO_MEM;
goto ret;
}
for (i = 0; i < apowerssize; i++) {
sizeof (double))) == NULL) {
err = BIG_NO_MEM;
goto ret;
}
}
#if (BIG_CHUNK_SIZE == 32)
}
for (; i < nlen; i++) {
nint[i] = 0;
}
#else
}
nint[i] = 0;
}
#endif
#if (BIG_CHUNK_SIZE == 32)
for (i = 0; i < n->len; i++) {
}
for (; i < nlen; i++) {
nint[i] = 0;
}
#else
for (i = 0; i < n->len; i++) {
}
nint[i] = 0;
}
#endif
for (i = 1; i < apowerssize; i++) {
}
#if (BIG_CHUNK_SIZE == 32)
}
for (; i < nlen + 1; i++) {
prod[i] = 0;
}
#else
}
prod[i] = 0;
}
#endif
k = 0;
l = 0;
p = 0;
bitcount = 0;
for (i = nbits / BIG_CHUNK_SIZE; i >= 0; i--) {
for (j = bitind - 1; j >= 0; j--) {
} else {
bitcount++;
p = p * 2 + bit;
if (bit == 1) {
k = k + l + 1;
l = 0;
} else {
l++;
}
for (m = 0; m < k; m++) {
}
apowers[p >> (l + 1)],
for (m = 0; m < l; m++) {
}
k = 0;
l = 0;
p = 0;
bitcount = 0;
}
}
}
}
for (m = 0; m < k; m++) {
}
if (p != 0) {
}
for (m = 0; m < l; m++) {
}
#if (BIG_CHUNK_SIZE == 32)
for (i = 0; i < nlen; i++) {
}
;
#else
for (i = 0; i < nlen / 2; i++) {
}
;
#endif
ret:
for (i = apowerssize - 1; i >= 0; i--) {
}
}
}
}
}
}
}
#ifdef _KERNEL
#endif
return (err);
}
#endif /* USE_FLOATING_POINT */
{
BIG_OK) {
return (err);
}
goto ret1;
}
/* clear the malloced bit to help cleanup */
goto ret2;
}
goto ret;
}
}
if (big_cmp_abs(a, n) > 0) {
goto ret;
}
} else {
}
goto ret;
}
goto ret;
}
} else {
}
goto ret;
}
goto ret;
}
ret:
big_finish(&rr);
}
ret2:
big_finish(&tmp);
ret1:
big_finish(&ma);
return (err);
}
{
}
{
} else {
}
return (err);
}
goto ret1;
}
goto ret2;
}
/*
* check whether a is too short - to avoid timing attacks
*/
alen--;
}
/*
* a is too short, add p*q to it before
* taking it modulo p and q
* this will also affect timing, but this difference
* does not depend on p or q, only on a
* (in "normal" operation, this path will never be
* taken, so it is not a performance penalty
*/
goto ret;
}
goto ret;
}
goto ret;
}
goto ret;
}
} else {
goto ret;
}
goto ret;
}
}
BIG_OK) {
goto ret;
}
BIG_OK) {
goto ret;
}
goto ret;
}
goto ret;
}
goto ret;
}
}
goto ret;
}
ret:
big_finish(&tmp);
ret2:
big_finish(&aq);
ret1:
big_finish(&ap);
return (err);
}
{
}
#if !defined(NO_BIG_ONE)
#endif
#if !defined(NO_BIG_TWO)
#endif
{
int i, diff;
nbits = big_numbits(n);
return (err);
goto ret1;
goto ret2;
goto ret3;
}
if (highbits == BIG_CHUNK_SIZE) {
} else {
}
goto ret;
}
if (diff <= 0) {
goto ret;
}
goto ret;
if (diff > 0) {
t = high;
mid = t;
} else if (diff < 0) {
t = low;
mid = t;
} else {
goto ret;
}
}
ret:
ret3:
ret2:
ret1:
return (err);
}
{
if (big_is_zero(nn) ||
*jac = 0;
return (BIG_OK);
}
} else {
}
return (err);
}
goto ret1;
}
goto ret2;
}
n = &t1;
m = &t2;
*jac = 1;
while (big_cmp_abs(&big_One, m) != 0) {
if (big_is_zero(n)) {
*jac = 0;
goto ret;
}
if ((m->value[0] & 1) == 0) {
(void) big_half_pos(m, m);
} else if ((n->value[0] & 1) == 0) {
(void) big_half_pos(n, n);
} else {
}
goto ret;
}
t = tmp2;
tmp2 = m;
m = n;
n = t;
}
}
ret:
ret2:
ret1:
return (err);
}
{
int i;
uint32_t m, w;
if (big_cmp_abs(k, &big_One) == 0) {
return (BIG_OK);
}
return (err);
goto ret1;
goto ret2;
m = big_numbits(k);
w = (m - 1) / BIG_CHUNK_SIZE;
}
if (big_cmp_abs(k, &ki) != 0) {
}
for (i = 0; i < m; i++) {
goto ret;
}
goto ret;
}
BIG_OK) {
goto ret;
}
BIG_OK) {
goto ret;
}
} else {
goto ret;
}
goto ret;
}
}
if (bit == 0) {
w--;
}
}
ret:
ret2:
ret1:
return (err);
}
{
int e, i, jac;
if (big_cmp_abs(n, &big_One) == 0) {
return (BIG_FALSE);
}
if (big_cmp_abs(n, &big_Two) == 0) {
return (BIG_TRUE);
}
if ((n->value[0] & 1) == 0) {
return (BIG_FALSE);
}
BIG_OK) {
return (err);
}
goto ret1;
}
goto ret2;
}
goto ret3;
}
goto ret4;
}
e = 0;
while ((o.value[0] & 1) == 0) {
e++;
(void) big_half_pos(&o, &o); /* cannot fail */
}
BIG_OK) {
goto ret;
}
i = 0;
while ((i < e) &&
if ((err =
goto ret;
i++;
}
goto ret;
}
goto ret;
}
goto ret;
}
if (big_cmp_abs(&tmp, n) == 0) {
goto ret;
}
do {
(void) big_add_abs(&o, &o, &big_One);
goto ret;
}
goto ret;
}
} while (jac != -1);
goto ret;
}
if ((big_cmp_abs(&Lkminus1, &o) == 0) &&
} else {
}
ret:
ret4:
ret3:
ret2:
ret1:
if (o.malloced) big_finish(&o);
return (err);
}
{
return (big_isprime_pos_ext(n, NULL));
}
{
3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47,
51, 53, 59, 61, 67, 71, 73, 79, 83, 89, 91, 97 };
int i;
return (err);
}
/* CONSTCOND */
while (1) {
for (i = 0;
i < sizeof (smallprimes) / sizeof (smallprimes[0]); i++) {
p = smallprimes[i];
}
}
}
for (i = 0; i < SIEVESIZE; i++) {
if (sieve[i] == 0) {
return (err);
} else {
goto out;
}
}
}
BIG_OK) {
return (err);
}
}
}
out:
return (BIG_OK);
}
{
}
{
return (err);
return (err);
return (err);
}
return (BIG_OK);
}
/*
* given m and e, computes the rest in the equation
* gcd(m, e) = cm * m + ce * e
*/
{
int len;
if (big_cmp_abs(m, e) >= 0) {
} else {
}
return (err);
}
goto ret1;
}
goto ret2;
}
goto ret3;
}
goto ret4;
}
goto ret5;
}
goto ret6;
}
goto ret7;
}
goto ret8;
}
while (!big_is_zero(ri)) {
t = riminus2;
ri = t;
goto ret;
}
goto ret;
}
t = vmiminus1;
vmi = t;
goto ret;
}
goto ret;
}
t = veiminus1;
vei = t;
BIG_OK) {
goto ret;
}
}
goto ret;
}
goto ret;
}
}
ret:
ret8:
ret7:
ret6:
ret5:
ret4:
ret3:
ret2:
ret1:
return (err);
}
/*
* Get a rlen-bit random number in BIGNUM format. Caller-supplied
* (*rfunc)(void *dbuf, size_t dlen) must return 0 for success and
* -1 for failure. Note: (*rfunc)() takes length in bytes, not bits.
*/
{
int shift;
return (BIG_INVALID_ARGS);
/*
* Convert rlen bits to r->len words (32- or 64-bit), rbytes bytes
* and extend r if it's not big enough to hold the random number.
*/
return (BIG_NO_MEM);
#ifdef BIGNUM_CHUNK_32
#else
#endif
return (BIG_NO_RANDOM);
/*
* If the bit length is not a word boundary, shift the most
* significant word so that we have an exactly rlen-long number.
*/
return (BIG_OK);
}