/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2012 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2015, Joyent, Inc.
*/
/*
* random_add_pseudo_entropy(), random_get_pseudo_bytes()
* and random_get_bytes().
*
* We periodically collect random bits from providers which are registered
* with the Kernel Cryptographic Framework (kCF) as capable of random
* number generation. The random bits are maintained in a cache and
* We pick a provider and call its SPI routine, if the cache does not have
* enough bytes to satisfy a request.
*
* random bits in the cache as a seed. We create one pseudo-random generator
* kmem-magazine-style, to avoid cache line contention.
*
* LOCKING HIERARCHY:
* 1) rmp->rm_mag.rm_lock protects the per-cpu pseudo-random generators.
* 2) rndpool_lock protects the high-quality randomness pool.
* It may be locked while a rmp->rm_mag.rm_lock is held.
*
* A history note: The kernel API and the software-based algorithms in this
*/
#include <sys/sysmacros.h>
#include <rng/fips_random.h>
typedef enum extract_type {
/*
* Hash-algo generic definitions. For now, they are SHA1's. We use SHA1
* routines directly instead of using k-API because we can't return any
* if a mechanism is disabled.
*/
/* HMAC-SHA1 */
/*
* Cache of random bytes implemented as a circular buffer. findex and rindex
* track the front and back of the circular buffer.
*/
/* and the global variables */
/* LINTED E_STATIC_UNUSED */
static void rnd_handler(void *);
static void rnd_alloc_magazines(void);
static void rnd_fips_discard_initial(void);
static void rnd_init2(void *);
static void rnd_schedule_timeout(void);
/*
* Called from kcf:_init()
*/
void
{
/*
* Add bytes to the cache using
* . 2 unpredictable times: high resolution time since the boot-time,
* and the current time-of-the day.
* This is used only to make the timeout value in the timer
* unpredictable.
*/
rnbyte_cnt = 0;
num_waiters = 0;
}
/*
* This is called via the system taskq, so that we can do further
* initializations that have to wait until the kcf module itself is
* done loading. (After kcf:_init returns.)
*/
static void
{
/*
* This will load a randomness provider; typically "swrand",
* but could be another provider if so configured.
*/
/* Update rng_prov_found etc. */
(void) kcf_rngprov_check();
/* FIPS 140-2 init. */
/* Start rnd_handler calls. */
}
/*
* Return TRUE if at least one provider exists that can
* supply random numbers.
*/
kcf_rngprov_check(void)
{
int rv;
/*
* We logged a warning once about no provider being available
* and now a provider became available. So, set the flag so
* that we can log again if the problem recurs.
*/
return (B_TRUE);
} else {
return (B_FALSE);
}
}
/*
* Pick a software-based provider and submit a request to seed
* its random number generator.
*/
static void
{
}
}
/*
* This routine is called for blocking reads.
*
* The argument is_taskq_thr indicates whether the caller is
* the taskq thread dispatched by the timeout handler routine.
* In this case, we cycle through all the providers
* submitting a request to each provider to generate random numbers.
*
* For other cases, we pick a provider and submit a request to generate
* random numbers. We retry using another provider if we get an error.
*
* Returns the number of bytes that are written to 'ptr'. Returns -1
* if no provider is found. ptr and need are unchanged.
*/
static int
{
int rv;
int prov_cnt = 0;
int total_bytes = 0;
prov_cnt++;
if (rv == CRYPTO_SUCCESS) {
total_bytes += need;
if (is_taskq_thr)
else {
break;
}
}
/* Add pd to the linked list of providers tried. */
break;
}
}
}
if (prov_cnt == 0) { /* no provider could be found. */
return (-1);
} else {
/* See comments in kcf_rngprov_check() */
}
return (total_bytes);
}
static void
{
if (rv == CRYPTO_SUCCESS)
}
/*
* Cycle through all the providers submitting a request to each provider
* to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT
* and ALWAYS_EXTRACT.
*
* Returns the number of bytes that are written to 'ptr'. Returns -1
* if no provider is found. ptr and len are unchanged.
*/
static int
{
int prov_cnt = 0;
blen = 0;
total_bytes = 0;
prov_cnt ++;
switch (pd->pd_prov_type) {
case CRYPTO_HW_PROVIDER:
/*
* We have to allocate a buffer here as we can not
* assume that the input buffer will remain valid
* when the callback comes. We use a fixed size buffer
* to simplify the book keeping.
*/
return (total_bytes);
}
break;
case CRYPTO_SW_PROVIDER:
/*
* We do not need to allocate a buffer in the software
* provider case as there is no callback involved. We
* avoid any extra data copy by directly passing 'ptr'.
*/
break;
}
if (rv == CRYPTO_SUCCESS) {
switch (pd->pd_prov_type) {
case CRYPTO_HW_PROVIDER:
/*
* Since we have the input buffer handy,
* we directly copy to it rather than
* adding to the pool.
*/
if (len < MINEXTRACTBYTES)
MINEXTRACTBYTES - len);
total_bytes += blen;
break;
case CRYPTO_SW_PROVIDER:
total_bytes += len;
len = 0;
break;
}
}
/*
* We free the buffer in the callback routine
* for the CRYPTO_QUEUED case.
*/
rv != CRYPTO_QUEUED) {
}
if (len == 0) {
break;
}
if (rv != CRYPTO_SUCCESS) {
/* Add pd to the linked list of providers tried. */
NULL) {
break;
}
}
}
}
if (prov_cnt == 0) { /* no provider could be found. */
return (-1);
} else {
/* See comments in kcf_rngprov_check() */
}
return (total_bytes);
}
static void
{
}
/*
* Returns "len" random or pseudo-random bytes in *ptr.
* Will block if not enough random bytes are available and the
* call is blocking.
*
* Called with rndpool_lock held (allowing caller to do optimistic locking;
* releases the lock before return).
*/
static int
{
int got;
/*
* Check if the request can be satisfied from the cache
* of random bytes.
*/
if (len <= rnbyte_cnt) {
return (0);
}
switch (how) {
case BLOCKING_EXTRACT:
break; /* No provider found */
return (0);
break;
case NONBLOCK_EXTRACT:
case ALWAYS_EXTRACT:
/* No provider found */
if (how == NONBLOCK_EXTRACT) {
return (EAGAIN);
}
} else {
return (0);
}
return (EAGAIN);
break;
}
while (len > 0) {
if (how == BLOCKING_EXTRACT) {
/* Check if there is enough */
while (rnbyte_cnt < MINEXTRACTBYTES) {
num_waiters++;
if (cv_wait_sig(&rndpool_read_cv,
&rndpool_lock) == 0) {
num_waiters--;
return (EINTR);
}
num_waiters--;
}
}
/* Figure out how many bytes to extract */
/*
* There are not enough bytes, but we can not block.
* runs an additional generation algorithm. So, there
* is no problem.
*/
while (len > 0) {
(RNDPOOLSIZE - 1);
}
break;
}
}
return (0);
}
int
{
int error;
return (error);
return (0);
}
/*
* Revisit this if the structs grow or we come up with a better way
* of cache-line-padding structures.
*/
sizeof (rndmag_t))
/*
* Per-CPU random state. Somewhat like like kmem's magazines, this provides
* a per-CPU instance of the pseudo-random generator. We have it much easier
* than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out.
*
* Note that this usage is preemption-safe; a thread
* entering a critical section remembers which generator it locked
* and unlocks the same one; should it be preempted and wind up running on
* a different CPU, there will be a brief period of increased contention
* before it exits the critical section but nothing will melt.
*/
typedef struct rndmag_s
{
} rndmag_t;
typedef struct rndmag_pad_s
{
} rndmag_pad_t;
/*
* FIPS 186-2 algorithm with a key created from bytes extracted
* from the pool. A maximum of PRNG_MAXOBLOCKS output blocks
* is generated before a new key is obtained.
*
* Note that callers to this routine are likely to assume it can't fail.
*
* Called with rmp locked; releases lock.
*/
static int
{
int nblock;
int i;
/* Nothing is being asked */
if (len == 0) {
return (0);
}
do {
/*
* Contention-avoiding rekey: see if
* the pool is locked, and if so, wait a bit.
* Do an 'exponential back-in' to ensure we don't
* run too long without rekey.
*/
/*
* Decaying exponential back-in for rekey.
*/
if ((rnbyte_cnt < MINEXTRACTBYTES) ||
(!mutex_tryenter(&rndpool_lock))) {
goto punt;
}
} else {
}
/* Get a new chunk of entropy */
oblocks = 0;
}
punt:
for (i = 0; i < HASHSIZE; i++) {
}
seed);
} else {
}
/*
* FIPS 140-2: Continuous RNG test - each generation
* of an n-bit block shall be compared with the previously
* generated block. Test shall fail if any two compared
* n-bit blocks are equal.
*/
for (i = 0; i < HASHSIZE/BYTES_IN_WORD; i++) {
break;
}
if (i == HASHSIZE/BYTES_IN_WORD) {
"block random bytes are same as the previous "
"one.\n");
/* discard random bytes and return error */
return (EIO);
}
HASHSIZE);
oblocks++;
nblock--;
} while (bytes > 0);
/* Zero out sensitive information */
return (0);
}
/*
* Per-CPU Random magazines.
*/
/*
* max_ncpus. On the off chance that we get loaded very early, we
* read it exactly once, to copy it here.
*/
/*
* Boot-time tunables, for experimentation.
*/
int
{
/*
* Anyone who asks for zero bytes of randomness should get slapped.
*/
/*
* Fast path.
*/
for (;;) {
/*
* Big requests bypass buffer and tail-call the
* generate routine directly.
*/
if (len > rndmag_threshold) {
}
return (0);
}
/*
* End fast path.
*/
/*
* Note: We assume the generate routine always succeeds
* in this case (because it does at present..)
* It also always releases rm_lock.
*/
}
}
/*
* We set up (empty) magazines for all of max_ncpus, possibly wasting a
* little memory on big systems that don't have the full set installed.
* See above; "empty" means "rptr equal to eptr"; this will trigger the
* refill path in rnd_get_pseudo_bytes above on the first call for each CPU.
*
* TODO: make rndmag_size tunable at run time!
*/
static void
{
int i;
if (rndmag_size < rndbuf_len)
KM_SLEEP);
for (i = 0; i < random_max_ncpus; i++) {
}
}
/*
* FIPS 140-2: the first n-bit (n > 15) block generated
* after power-up, initialization, or reset shall not
* be used, but shall be saved for comparison.
*/
static void
rnd_fips_discard_initial(void)
{
int i;
for (i = 0; i < random_max_ncpus; i++) {
/* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */
(void) rnd_get_bytes(discard_buf,
/* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */
/* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */
}
}
static void
rnd_schedule_timeout(void)
{
/*
* The new timeout value is taken from the buffer of random bytes.
* We're merely reading the first 32 bits from the buffer here, not
* consuming any random bytes.
* The timeout multiplier value is a random value between 0.5 sec and
* 1.544480 sec (0.5 sec + 0xFF000 microseconds).
* The new timeout is TIMEOUT_INTERVAL times that multiplier.
*/
}
/*
* . POLLOUT always succeeds.
* . POLLIN and POLLRDNORM will block until a
* minimum amount of entropy is available.
*
* &rnd_pollhead is passed in *phpp in order to indicate the calling thread
* will block. When enough random bytes are available, later, the timeout
* handler routine will issue the pollwakeup() calls.
*/
void
{
/*
* Sampling of rnbyte_cnt is an atomic
* operation. Hence we do not need any locking.
*/
if (rnbyte_cnt >= MINEXTRACTBYTES)
}
*phpp = &rnd_pollhead;
}
/*ARGSUSED*/
static void
{
int len = 0;
if (!rng_prov_found && rng_ok_to_log) {
}
if (num_waiters > 0)
/*
* Note: len has no relationship with how many bytes
* a poll thread needs.
*/
else if (rnbyte_cnt < RNDPOOLSIZE)
/*
* Only one thread gets to set rngprov_task_idle at a given point
* of time and the order of the writes is defined. Also, it is OK
* if we read an older value of it and skip the dispatch once
* since we will get the correct value during the next time here.
* So, no locking is needed here.
*/
if (len > 0 && rngprov_task_idle) {
/*
* It is OK if taskq_dispatch fails here. We will retry
* the next time around. Meanwhile, a thread doing a
* read() will go to the provider directly, if the
* cache becomes empty.
*/
}
}
/*
* Wake up threads waiting in poll() or for enough accumulated
* concurrent with a read(), the polling process may be woken up
* indicating that enough randomness is now available for reading,
* and another process *steals* the bits from the pool, causing the
* subsequent read() from the first process to block. It is acceptable
* since the blocking will eventually end, after the timeout
* has expired enough times to honor the read.
*
* Note - Since we hold the rndpool_lock across the pollwakeup() call
* we MUST NOT grab the rndpool_lock in kcf_rndchpoll().
*/
if (rnbyte_cnt >= MINEXTRACTBYTES)
if (num_waiters > 0)
}
static void
{
rnbyte_cnt++;
}
/* Handle buffer full case */
while (len > 0) {
}
}
/*
* Caller should check len <= rnbyte_cnt under the
* rndpool_lock before calling.
*/
static void
{
while (len > 0) {
rnbyte_cnt--;
}
}
/* Random number exported entry points */
/*
* Mix the supplied bytes into the entropy pool of a kCF
* RNG provider.
*/
int
{
if (len < 1)
return (-1);
return (0);
}
/*
* Mix the supplied bytes into the entropy pool of a kCF
* RNG provider. Mix immediately.
*/
int
{
if (len < 1)
return (-1);
return (0);
}
/*
* always succeeds. Returns 0.
*/
int
{
if (len < 1)
return (0);
}
/*
* on success. Returns EAGAIN if there is insufficient entropy.
*/
int
{
if (len < 1)
return (0);
}
int
{
if (len < 1)
return (0);
}