kcf_random.c revision fa626f0c30d0a6dcbc5bb89c70534722f6380329
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* random_get_pseudo_bytes() and random_get_bytes().
*
* We periodically collect random bits from providers which are registered
* with the Kernel Cryptographic Framework (kCF) as capable of random
* number generation. The random bits are maintained in a cache and
* We pick a provider and call its SPI routine, if the cache does not have
* enough bytes to satisfy a request.
*
* random bits in the cache as a seed. We create one pseudo-random generator
* kmem-magazine-style, to avoid cache line contention.
*
* LOCKING HIERARCHY:
* 1) rmp->rm_lock protects the per-cpu pseudo-random generators.
* 2) rndpool_lock protects the high-quality randomness pool.
* It may be locked while a rmp->rm_lock is held.
*
* A history note: The kernel API and the software-based algorithms in this
*/
#include <sys/sysmacros.h>
#define MINEXTRACTBYTES 20
#define MAXEXTRACTBYTES 1024
typedef enum extract_type {
/*
* Hash-algo generic definitions. For now, they are SHA1's. We use SHA1
* routines directly instead of using k-API because we can't return any
* if a mechanism is disabled.
*/
#define HASHSIZE 20
/* HMAC-SHA1 */
#define HMAC_KEYSIZE 20
#define HMAC_BLOCK_SIZE 64
#define HMAC_KEYSCHED sha1keysched_t
/* HMAC-SHA1 "keyschedule" */
typedef struct sha1keysched_s {
/*
* Cache of random bytes implemented as a circular buffer. findex and rindex
* track the front and back of the circular buffer.
*/
static int rnbyte_cnt; /* Number of bytes in the cache */
/* and the global variables */
static int num_waiters; /* #threads waiting to read from /dev/random */
static struct pollhead rnd_pollhead;
static timeout_id_t kcf_rndtimeout_id;
static void rnd_handler(void *);
static void rnd_alloc_magazines();
void
{
/*
* Add bytes to the cache using
* . 2 unpredictable times: high resolution time since the boot-time,
* and the current time-of-the day.
* This is used only to make the timeout value in the timer
* unpredictable.
*/
rnbyte_cnt = 0;
num_waiters = 0;
}
/*
* Return TRUE if at least one provider exists that can
* supply random numbers.
*/
kcf_rngprov_check(void)
{
int rv;
return (B_TRUE);
} else
return (B_FALSE);
}
/*
* Pick a software-based provider and submit a request to seed
* its random number generator.
*/
static void
{
}
}
/* Boot-time tunable for experimentation. */
int kcf_limit_hwrng = 1;
/*
* This routine is called for blocking reads.
*
* The argument from_user_api indicates whether the caller is
*
* The argument is_taskq_thr indicates whether the caller is
* the taskq thread dispatched by the timeout handler routine.
* In this case, we cycle through all the providers
* submitting a request to each provider to generate random numbers.
*
* For other cases, we pick a provider and submit a request to generate
* random numbers. We retry using another provider if we get an error.
*
* Returns the number of bytes that are written to 'ptr'. Returns -1
* if no provider is found. ptr and need are unchanged.
*/
static int
{
int rv;
int prov_cnt = 0;
int total_bytes = 0;
prov_cnt++;
/*
* Typically a hardware RNG is a multi-purpose
* crypto card and hence we do not want to overload the card
* just for random numbers. The following check is to prevent
* a user process from hogging the hardware RNG. Note that we
* still use the hardware RNG from the periodically run
* taskq thread.
*/
kcf_limit_hwrng == 1) {
goto try_next;
}
if (rv == CRYPTO_SUCCESS) {
total_bytes += need;
if (is_taskq_thr)
else {
break;
}
}
/* Add pd to the linked list of providers tried. */
break;
}
}
}
if (prov_cnt == 0) { /* no provider could be found. */
return (-1);
}
return (total_bytes);
}
static void
{
if (rv == CRYPTO_SUCCESS)
}
/*
* Cycle through all the providers submitting a request to each provider
* to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT
* and ALWAYS_EXTRACT.
*
* Returns the number of bytes that are written to 'ptr'. Returns -1
* if no provider is found. ptr and len are unchanged.
*/
static int
{
int prov_cnt = 0;
blen = 0;
total_bytes = 0;
prov_cnt ++;
switch (pd->pd_prov_type) {
case CRYPTO_HW_PROVIDER:
/* See comments in rngprov_getbytes() */
goto try_next;
/*
* We have to allocate a buffer here as we can not
* assume that the input buffer will remain valid
* when the callback comes. We use a fixed size buffer
* to simplify the book keeping.
*/
return (total_bytes);
}
break;
case CRYPTO_SW_PROVIDER:
/*
* We do not need to allocate a buffer in the software
* provider case as there is no callback involved. We
* avoid any extra data copy by directly passing 'ptr'.
*/
break;
}
if (rv == CRYPTO_SUCCESS) {
switch (pd->pd_prov_type) {
case CRYPTO_HW_PROVIDER:
/*
* Since we have the input buffer handy,
* we directly copy to it rather than
* adding to the pool.
*/
if (len < MINEXTRACTBYTES)
MINEXTRACTBYTES - len);
total_bytes += blen;
break;
case CRYPTO_SW_PROVIDER:
total_bytes += len;
len = 0;
break;
}
}
/*
* We free the buffer in the callback routine
* for the CRYPTO_QUEUED case.
*/
rv != CRYPTO_QUEUED) {
}
if (len == 0) {
break;
}
if (rv != CRYPTO_SUCCESS) {
/* Add pd to the linked list of providers tried. */
NULL) {
break;
}
}
}
}
if (prov_cnt == 0) { /* no provider could be found. */
return (-1);
}
return (total_bytes);
}
static void
rngprov_task(void *arg)
{
}
}
/*
* Returns "len" random or pseudo-random bytes in *ptr.
* Will block if not enough random bytes are available and the
* call is blocking.
*
* Called with rndpool_lock held (allowing caller to do optimistic locking;
* releases the lock before return).
*/
static int
{
int bytes;
/*
* Check if the request can be satisfied from the cache
* of random bytes.
*/
if (len <= rnbyte_cnt) {
return (0);
}
switch (how) {
case BLOCKING_EXTRACT:
B_FALSE)) == -1)
break; /* No provider found */
return (0);
break;
case NONBLOCK_EXTRACT:
case ALWAYS_EXTRACT:
from_user_api)) == -1) {
/* No provider found */
if (how == NONBLOCK_EXTRACT) {
return (EAGAIN);
}
} else {
return (0);
}
return (EAGAIN);
break;
}
while (len > 0) {
if (how == BLOCKING_EXTRACT) {
/* Check if there is enough */
while (rnbyte_cnt < MINEXTRACTBYTES) {
num_waiters++;
if (cv_wait_sig(&rndpool_read_cv,
&rndpool_lock) == 0) {
num_waiters--;
return (EINTR);
}
num_waiters--;
}
}
/* Figure out how many bytes to extract */
/*
* There are not enough bytes, but we can not block.
* runs an additional generation algorithm. So, there
* is no problem.
*/
while (len > 0) {
(RNDPOOLSIZE - 1);
}
break;
}
}
return (0);
}
int
{
int error;
return (error);
return (0);
}
/*
* Revisit this if the structs grow or we come up with a better way
* of cache-line-padding structures.
*/
#define RND_CPU_CACHE_SIZE 64
#define RND_CPU_PAD (RND_CPU_PAD_SIZE - \
/*
* Per-CPU random state. Somewhat like like kmem's magazines, this provides
* a per-CPU instance of the pseudo-random generator. We have it much easier
* than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out.
*
* Note that this usage is preemption-safe; a thread
* entering a critical section remembers which generator it locked
* and unlocks the same one; should it be preempted and wind up running on
* a different CPU, there will be a brief period of increased contention
* before it exits the critical section but nothing will melt.
*/
typedef struct rndmag_s
{
} rndmag_t;
/*
* rotating counter with a key created from bytes extracted
* from the pool. A maximum of PRNG_MAXOBLOCKS output blocks
* is generated before a new key is obtained.
*
* Note that callers to this routine are likely to assume it can't fail.
*
* Called with rmp locked; releases lock.
*/
static int
{
/* Nothing is being asked */
if (len == 0) {
return (0);
}
do {
/*
* Contention-avoiding rekey: see if
* the pool is locked, and if so, wait a bit.
* Do an 'exponential back-in' to ensure we don't
* run too long without rekey.
*/
/*
* Decaying exponential back-in for rekey.
*/
if ((rnbyte_cnt < MINEXTRACTBYTES) ||
(!mutex_tryenter(&rndpool_lock))) {
goto punt;
}
} else {
}
/* Get a new chunk of entropy */
/* Set up key */
/* Get new counter value by encrypting timestamp */
oblocks = 0;
}
punt:
/* Hash counter to produce prn stream */
} else {
}
rmp->rm_counter++;
oblocks++;
nblock--;
} while (bytes > 0);
return (0);
}
/*
* Per-CPU Random magazines.
*/
static size_t rndmag_total;
/*
* max_ncpus. On the off chance that we get loaded very early, we
* read it exactly once, to copy it here.
*/
static uint32_t random_max_ncpus = 0;
/*
* Boot-time tunables, for experimentation.
*/
int
{
/*
* Anyone who asks for zero bytes of randomness should get slapped.
*/
/*
* Fast path.
*/
for (;;) {
/*
* Big requests bypass buffer and tail-call the
* generate routine directly.
*/
if (len > rndmag_threshold) {
}
return (0);
}
/*
* End fast path.
*/
/*
* Note: We assume the generate routine always succeeds
* in this case (because it does at present..)
* It also always releases rm_lock.
*/
}
}
/*
* We set up (empty) magazines for all of max_ncpus, possibly wasting a
* little memory on big systems that don't have the full set installed.
* See above; "empty" means "rptr equal to eptr"; this will trigger the
* refill path in rnd_get_pseudo_bytes above on the first call for each CPU.
*
* TODO: make rndmag_size tunable at run time!
*/
static void
{
int i;
if (rndmag_size < rndbuf_len)
for (i = 0; i < random_max_ncpus; i++) {
}
}
void
{
if (do_mech2id)
/*
* The new timeout value is taken from the buffer of random bytes.
* We're merely reading the first 32 bits from the buffer here, not
* consuming any random bytes.
* The timeout multiplier value is a random value between 0.5 sec and
* 1.544480 sec (0.5 sec + 0xFF000 microseconds).
* The new timeout is TIMEOUT_INTERVAL times that multiplier.
*/
}
/*
* &rnd_pollhead is passed in *phpp in order to indicate the calling thread
* will block. When enough random bytes are available, later, the timeout
* handler routine will issue the pollwakeup() calls.
*/
void
{
/*
* Sampling of rnbyte_cnt is an atomic
* operation. Hence we do not need any locking.
*/
if (rnbyte_cnt >= MINEXTRACTBYTES) {
} else {
*reventsp = 0;
if (!anyyet)
*phpp = &rnd_pollhead;
}
}
/*ARGSUSED*/
static void
rnd_handler(void *arg)
{
int len = 0;
if (num_waiters > 0)
else if (rnbyte_cnt < RNDPOOLSIZE)
if (len > 0) {
} else if (!kcf_rngprov_check()) {
}
/*
* Wake up threads waiting in poll() or for enough accumulated
* concurrent with a read(), the polling process may be woken up
* indicating that enough randomness is now available for reading,
* and another process *steals* the bits from the pool, causing the
* subsequent read() from the first process to block. It is acceptable
* since the blocking will eventually end, after the timeout
* has expired enough times to honor the read.
*
* Note - Since we hold the rndpool_lock across the pollwakeup() call
* we MUST NOT grab the rndpool_lock in kcf_rndchpoll().
*/
if (rnbyte_cnt >= MINEXTRACTBYTES)
if (num_waiters > 0)
}
/* Hashing functions */
static void
{
int i;
int nints;
/*
* XOR key with ipad (0x36) and opad (0x5c) as defined
* in RFC 2104.
*/
for (i = 0; i < nints; i++) {
ip[i] ^= 0x36363636;
op[i] ^= 0x5c5c5c5c;
}
/* Perform hash with ipad */
/* Perform hash with opad */
}
static void
{
icontext = saved_contexts[0];
/*
* Perform Hash(K XOR OPAD, DIGEST), where DIGEST is the
* Hash(K XOR IPAD, DATA).
*/
}
static void
{
rnbyte_cnt++;
}
/* Handle buffer full case */
while (len > 0) {
}
}
/*
* Caller should check len <= rnbyte_cnt under the
* rndpool_lock before calling.
*/
static void
{
while (len > 0) {
rnbyte_cnt--;
}
}
/* Random number exported entry points */
/*
* Mix the supplied bytes into the entropy pool of a kCF
* RNG provider.
*/
/* ARGSUSED */
int
{
if (len < 1)
return (-1);
return (0);
}
/*
* always succeeds. Returns 0.
*/
int
{
if (len < 1)
return (0);
}
/*
* on success. Returns EAGAIN if there is insufficient entropy.
*/
int
{
if (len < 1)
return (0);
}