kcf_random.c revision 7c478bd95313f5f23a4c958a745db2134aa03244
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* This file implements the interfaces that the /dev/random
* driver uses for read(2), write(2) and poll(2) on /dev/random or
* /dev/urandom. It also implements the kernel API - random_add_entropy(),
* random_get_pseudo_bytes() and random_get_bytes().
*
* We periodically collect random bits from providers which are registered
* with the Kernel Cryptographic Framework (kCF) as capable of random
* number generation. The random bits are maintained in a cache and
* it is used for high quality random numbers (/dev/random) requests.
* We pick a provider and call its SPI routine, if the cache does not have
* enough bytes to satisfy a request.
*
* /dev/urandom requests use a software-based generator algorithm that uses the
* random bits in the cache as a seed. We create one pseudo-random generator
* (for /dev/urandom) per possible CPU on the system, and use it,
* kmem-magazine-style, to avoid cache line contention.
*
* LOCKING HIERARCHY:
* 1) rmp->rm_lock protects the per-cpu pseudo-random generators.
* 2) rndpool_lock protects the high-quality randomness pool.
* It may be locked while a rmp->rm_lock is held.
*
* A history note: The kernel API and the software-based algorithms in this
* file used to be part of the /dev/random driver.
*/
#include <sys/types.h>
#include <sys/conf.h>
#include <sys/sunddi.h>
#include <sys/disp.h>
#include <sys/modctl.h>
#include <sys/ddi.h>
#include <sys/crypto/common.h>
#include <sys/crypto/api.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/sched_impl.h>
#include <sys/random.h>
#include <sys/sha1.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/cpuvar.h>
#include <sys/taskq.h>
#define RNDPOOLSIZE 1024 /* Pool size in bytes */
#define MINEXTRACTBYTES 20
#define MAXEXTRACTBYTES 1024
#define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */
#define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */
typedef enum extract_type {
NONBLOCK_EXTRACT,
BLOCKING_EXTRACT,
ALWAYS_EXTRACT
} extract_type_t;
/*
* Hash-algo generic definitions. For now, they are SHA1's. We use SHA1
* routines directly instead of using k-API because we can't return any
* error code in /dev/urandom case and we can get an error using k-API
* if a mechanism is disabled.
*/
#define HASHSIZE 20
#define HASH_CTX SHA1_CTX
#define HashInit(ctx) SHA1Init((ctx))
#define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s))
#define HashFinal(d, ctx) SHA1Final((d), (ctx))
/* HMAC-SHA1 */
#define HMAC_KEYSIZE 20
#define HMAC_BLOCK_SIZE 64
#define HMAC_KEYSCHED sha1keysched_t
#define SET_ENCRYPT_KEY(k, s, ks) hmac_key((k), (s), (ks))
#define HMAC_ENCRYPT(ks, p, s, d) hmac_encr((ks), (uint8_t *)(p), s, d)
/* HMAC-SHA1 "keyschedule" */
typedef struct sha1keysched_s {
SHA1_CTX ictx;
SHA1_CTX octx;
} sha1keysched_t;
/*
* Cache of random bytes implemented as a circular buffer. findex and rindex
* track the front and back of the circular buffer.
*/
uint8_t rndpool[RNDPOOLSIZE];
static int findex, rindex;
static int rnbyte_cnt; /* Number of bytes in the cache */
static kmutex_t rndpool_lock; /* protects r/w accesses to the cache, */
/* and the global variables */
static kcondvar_t rndpool_read_cv; /* serializes poll/read syscalls */
static int num_waiters; /* #threads waiting to read from /dev/random */
static struct pollhead rnd_pollhead;
static timeout_id_t kcf_rndtimeout_id;
static crypto_mech_type_t rngmech_type = CRYPTO_MECH_INVALID;
rnd_stats_t rnd_stats;
static void rndc_addbytes(uint8_t *, size_t);
static void rndc_getbytes(uint8_t *ptr, size_t len);
static void rnd_handler(void *);
static void rnd_alloc_magazines();
static void hmac_key(uint8_t *, size_t, void *);
static void hmac_encr(void *, uint8_t *, size_t, uint8_t *);
void
kcf_rnd_init()
{
hrtime_t ts;
time_t now;
mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL);
/*
* Add bytes to the cache using
* . 2 unpredictable times: high resolution time since the boot-time,
* and the current time-of-the day.
* This is used only to make the timeout value in the timer
* unpredictable.
*/
ts = gethrtime();
rndc_addbytes((uint8_t *)&ts, sizeof (ts));
(void) drv_getparm(TIME, &now);
rndc_addbytes((uint8_t *)&now, sizeof (now));
rnbyte_cnt = 0;
findex = rindex = 0;
num_waiters = 0;
rngmech_type = KCF_MECHID(KCF_MISC_CLASS, 0);
rnd_alloc_magazines();
}
/*
* Return TRUE if at least one provider exists that can
* supply random numbers.
*/
boolean_t
kcf_rngprov_check(void)
{
int rv;
kcf_provider_desc_t *pd;
if ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv,
NULL, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) {
KCF_PROV_REFRELE(pd);
return (B_TRUE);
} else
return (B_FALSE);
}
/*
* Pick a software-based provider and submit a request to seed
* its random number generator.
*/
static void
rngprov_seed(uint8_t *buf, int len)
{
kcf_provider_desc_t *pd = NULL;
kcf_req_params_t params;
if (kcf_get_sw_prov(rngmech_type, &pd, B_FALSE) == CRYPTO_SUCCESS) {
KCF_WRAP_RANDOM_OPS_PARAMS(&params, KCF_OP_RANDOM_SEED,
pd->pd_sid, buf, len);
(void) kcf_submit_request(pd, NULL, NULL, &params, B_FALSE);
KCF_PROV_REFRELE(pd);
}
}
/* Boot-time tunable for experimentation. */
int kcf_limit_hwrng = 1;
/*
* This routine is called for blocking reads.
*
* The argument from_user_api indicates whether the caller is
* from userland coming via the /dev/random driver.
*
* The argument is_taskq_thr indicates whether the caller is
* the taskq thread dispatched by the timeout handler routine.
* In this case, we cycle through all the providers
* submitting a request to each provider to generate random numbers.
*
* For other cases, we pick a provider and submit a request to generate
* random numbers. We retry using another provider if we get an error.
*
* Returns the number of bytes that are written to 'ptr'. Returns -1
* if no provider is found. ptr and need are unchanged.
*/
static int
rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t from_user_api,
boolean_t is_taskq_thr)
{
int rv;
int prov_cnt = 0;
int total_bytes = 0;
kcf_provider_desc_t *pd;
kcf_req_params_t params;
kcf_prov_tried_t *list = NULL;
while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv,
list, CRYPTO_FG_RANDOM, B_FALSE, 0)) != NULL) {
prov_cnt++;
/*
* Typically a hardware RNG is a multi-purpose
* crypto card and hence we do not want to overload the card
* just for random numbers. The following check is to prevent
* a user process from hogging the hardware RNG. Note that we
* still use the hardware RNG from the periodically run
* taskq thread.
*/
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && from_user_api &&
kcf_limit_hwrng == 1) {
ASSERT(is_taskq_thr == B_FALSE);
goto try_next;
}
KCF_WRAP_RANDOM_OPS_PARAMS(&params, KCF_OP_RANDOM_GENERATE,
pd->pd_sid, ptr, need);
rv = kcf_submit_request(pd, NULL, NULL, &params, B_FALSE);
ASSERT(rv != CRYPTO_QUEUED);
if (rv == CRYPTO_SUCCESS) {
total_bytes += need;
if (is_taskq_thr)
rndc_addbytes(ptr, need);
else {
KCF_PROV_REFRELE(pd);
break;
}
}
if (is_taskq_thr || rv != CRYPTO_SUCCESS) {
try_next:
/* Add pd to the linked list of providers tried. */
if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) {
KCF_PROV_REFRELE(pd);
break;
}
}
}
if (list != NULL)
kcf_free_triedlist(list);
if (prov_cnt == 0) { /* no provider could be found. */
return (-1);
}
return (total_bytes);
}
static void
notify_done(void *arg, int rv)
{
uchar_t *rndbuf = arg;
if (rv == CRYPTO_SUCCESS)
rndc_addbytes(rndbuf, MINEXTRACTBYTES);
bzero(rndbuf, MINEXTRACTBYTES);
kmem_free(rndbuf, MINEXTRACTBYTES);
}
/*
* Cycle through all the providers submitting a request to each provider
* to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT
* and ALWAYS_EXTRACT.
*
* Returns the number of bytes that are written to 'ptr'. Returns -1
* if no provider is found. ptr and len are unchanged.
*/
static int
rngprov_getbytes_nblk(uint8_t *ptr, size_t len, boolean_t from_user_api)
{
int rv, blen, total_bytes;
uchar_t *rndbuf;
kcf_provider_desc_t *pd;
kcf_req_params_t params;
crypto_call_req_t req;
kcf_prov_tried_t *list = NULL;
int prov_cnt = 0;
blen = 0;
total_bytes = 0;
req.cr_flag = CRYPTO_SKIP_REQID;
req.cr_callback_func = notify_done;
while ((pd = kcf_get_mech_provider(rngmech_type, NULL, &rv,
list, CRYPTO_FG_RANDOM, CHECK_RESTRICT(&req), 0)) != NULL) {
prov_cnt ++;
switch (pd->pd_prov_type) {
case CRYPTO_HW_PROVIDER:
/* See comments in rngprov_getbytes() */
if (from_user_api && kcf_limit_hwrng == 1)
goto try_next;
/*
* We have to allocate a buffer here as we can not
* assume that the input buffer will remain valid
* when the callback comes. We use a fixed size buffer
* to simplify the book keeping.
*/
rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP);
if (rndbuf == NULL) {
KCF_PROV_REFRELE(pd);
if (list != NULL)
kcf_free_triedlist(list);
return (total_bytes);
}
req.cr_callback_arg = rndbuf;
KCF_WRAP_RANDOM_OPS_PARAMS(&params,
KCF_OP_RANDOM_GENERATE,
pd->pd_sid, rndbuf, MINEXTRACTBYTES);
break;
case CRYPTO_SW_PROVIDER:
/*
* We do not need to allocate a buffer in the software
* provider case as there is no callback involved. We
* avoid any extra data copy by directly passing 'ptr'.
*/
KCF_WRAP_RANDOM_OPS_PARAMS(&params,
KCF_OP_RANDOM_GENERATE,
pd->pd_sid, ptr, len);
break;
}
rv = kcf_submit_request(pd, NULL, &req, &params, B_FALSE);
if (rv == CRYPTO_SUCCESS) {
switch (pd->pd_prov_type) {
case CRYPTO_HW_PROVIDER:
/*
* Since we have the input buffer handy,
* we directly copy to it rather than
* adding to the pool.
*/
blen = min(MINEXTRACTBYTES, len);
bcopy(rndbuf, ptr, blen);
if (len < MINEXTRACTBYTES)
rndc_addbytes(rndbuf + len,
MINEXTRACTBYTES - len);
ptr += blen;
len -= blen;
total_bytes += blen;
break;
case CRYPTO_SW_PROVIDER:
total_bytes += len;
len = 0;
break;
}
}
/*
* We free the buffer in the callback routine
* for the CRYPTO_QUEUED case.
*/
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
rv != CRYPTO_QUEUED) {
bzero(rndbuf, MINEXTRACTBYTES);
kmem_free(rndbuf, MINEXTRACTBYTES);
}
if (len == 0) {
KCF_PROV_REFRELE(pd);
break;
}
if (rv != CRYPTO_SUCCESS) {
try_next:
/* Add pd to the linked list of providers tried. */
if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) ==
NULL) {
KCF_PROV_REFRELE(pd);
break;
}
}
}
if (list != NULL) {
kcf_free_triedlist(list);
}
if (prov_cnt == 0) { /* no provider could be found. */
return (-1);
}
return (total_bytes);
}
static void
rngprov_task(void *arg)
{
int len = (int)(uintptr_t)arg;
uchar_t tbuf[MAXEXTRACTBYTES];
ASSERT(len <= MAXEXTRACTBYTES);
if (rngprov_getbytes(tbuf, len, B_FALSE, B_TRUE) == -1) {
cmn_err(CE_WARN, "No randomness provider enabled for "
"/dev/random. Use cryptoadm(1M) to enable a provider.");
}
}
/*
* Returns "len" random or pseudo-random bytes in *ptr.
* Will block if not enough random bytes are available and the
* call is blocking.
*
* Called with rndpool_lock held (allowing caller to do optimistic locking;
* releases the lock before return).
*/
static int
rnd_get_bytes(uint8_t *ptr, size_t len, extract_type_t how,
boolean_t from_user_api)
{
int bytes;
size_t got;
ASSERT(mutex_owned(&rndpool_lock));
/*
* Check if the request can be satisfied from the cache
* of random bytes.
*/
if (len <= rnbyte_cnt) {
rndc_getbytes(ptr, len);
mutex_exit(&rndpool_lock);
return (0);
}
mutex_exit(&rndpool_lock);
switch (how) {
case BLOCKING_EXTRACT:
if ((got = rngprov_getbytes(ptr, len, from_user_api,
B_FALSE)) == -1)
break; /* No provider found */
if (got == len)
return (0);
len -= got;
ptr += got;
break;
case NONBLOCK_EXTRACT:
case ALWAYS_EXTRACT:
if ((got = rngprov_getbytes_nblk(ptr, len,
from_user_api)) == -1) {
/* No provider found */
if (how == NONBLOCK_EXTRACT) {
return (EAGAIN);
}
} else {
if (got == len)
return (0);
len -= got;
ptr += got;
}
if (how == NONBLOCK_EXTRACT && (rnbyte_cnt < len))
return (EAGAIN);
break;
}
mutex_enter(&rndpool_lock);
while (len > 0) {
if (how == BLOCKING_EXTRACT) {
/* Check if there is enough */
while (rnbyte_cnt < MINEXTRACTBYTES) {
num_waiters++;
if (cv_wait_sig(&rndpool_read_cv,
&rndpool_lock) == 0) {
num_waiters--;
mutex_exit(&rndpool_lock);
return (EINTR);
}
num_waiters--;
}
}
/* Figure out how many bytes to extract */
bytes = min(len, rnbyte_cnt);
rndc_getbytes(ptr, bytes);
len -= bytes;
ptr += bytes;
if (len > 0 && how == ALWAYS_EXTRACT) {
/*
* There are not enough bytes, but we can not block.
* This only happens in the case of /dev/urandom which
* runs an additional generation algorithm. So, there
* is no problem.
*/
while (len > 0) {
*ptr = rndpool[findex];
ptr++; len--;
rindex = findex = (findex + 1) &
(RNDPOOLSIZE - 1);
}
break;
}
}
mutex_exit(&rndpool_lock);
return (0);
}
int
kcf_rnd_get_bytes(uint8_t *ptr, size_t len, boolean_t noblock,
boolean_t from_user_api)
{
extract_type_t how;
int error;
how = noblock ? NONBLOCK_EXTRACT : BLOCKING_EXTRACT;
mutex_enter(&rndpool_lock);
if ((error = rnd_get_bytes(ptr, len, how, from_user_api)) != 0)
return (error);
BUMP_RND_STATS(rs_rndOut, len);
return (0);
}
/*
* Revisit this if the structs grow or we come up with a better way
* of cache-line-padding structures.
*/
#define RND_CPU_CACHE_SIZE 64
#define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*5
#define RND_CPU_PAD (RND_CPU_PAD_SIZE - \
(sizeof (kmutex_t) + 3*sizeof (uint8_t *) + sizeof (HMAC_KEYSCHED) + \
sizeof (uint64_t) + 3*sizeof (uint32_t) + sizeof (rnd_stats_t)))
/*
* Per-CPU random state. Somewhat like like kmem's magazines, this provides
* a per-CPU instance of the pseudo-random generator. We have it much easier
* than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out.
*
* Note that this usage is preemption-safe; a thread
* entering a critical section remembers which generator it locked
* and unlocks the same one; should it be preempted and wind up running on
* a different CPU, there will be a brief period of increased contention
* before it exits the critical section but nothing will melt.
*/
typedef struct rndmag_s
{
kmutex_t rm_lock;
uint8_t *rm_buffer; /* Start of buffer */
uint8_t *rm_eptr; /* End of buffer */
uint8_t *rm_rptr; /* Current read pointer */
HMAC_KEYSCHED rm_ks; /* seed */
uint64_t rm_counter; /* rotating counter for extracting */
uint32_t rm_oblocks; /* time to rekey? */
uint32_t rm_ofuzz; /* Rekey backoff state */
uint32_t rm_olimit; /* Hard rekey limit */
rnd_stats_t rm_stats; /* Per-CPU Statistics */
uint8_t rm_pad[RND_CPU_PAD];
} rndmag_t;
/*
* Generate random bytes for /dev/urandom by encrypting a
* rotating counter with a key created from bytes extracted
* from the pool. A maximum of PRNG_MAXOBLOCKS output blocks
* is generated before a new key is obtained.
*
* Note that callers to this routine are likely to assume it can't fail.
*
* Called with rmp locked; releases lock.
*/
static int
rnd_generate_pseudo_bytes(rndmag_t *rmp, uint8_t *ptr, size_t len)
{
size_t bytes = len;
int nblock, size;
uint32_t oblocks;
uint8_t digest[HASHSIZE];
ASSERT(mutex_owned(&rmp->rm_lock));
/* Nothing is being asked */
if (len == 0) {
mutex_exit(&rmp->rm_lock);
return (0);
}
nblock = howmany(len, HASHSIZE);
rmp->rm_oblocks += nblock;
oblocks = rmp->rm_oblocks;
do {
if (oblocks >= rmp->rm_olimit) {
hrtime_t timestamp;
uint8_t key[HMAC_KEYSIZE];
/*
* Contention-avoiding rekey: see if
* the pool is locked, and if so, wait a bit.
* Do an 'exponential back-in' to ensure we don't
* run too long without rekey.
*/
if (rmp->rm_ofuzz) {
/*
* Decaying exponential back-in for rekey.
*/
if ((rnbyte_cnt < MINEXTRACTBYTES) ||
(!mutex_tryenter(&rndpool_lock))) {
rmp->rm_olimit += rmp->rm_ofuzz;
rmp->rm_ofuzz >>= 1;
goto punt;
}
} else {
mutex_enter(&rndpool_lock);
}
/* Get a new chunk of entropy */
(void) rnd_get_bytes(key, HMAC_KEYSIZE,
ALWAYS_EXTRACT, B_FALSE);
/* Set up key */
SET_ENCRYPT_KEY(key, HMAC_KEYSIZE, &rmp->rm_ks);
/* Get new counter value by encrypting timestamp */
timestamp = gethrtime();
HMAC_ENCRYPT(&rmp->rm_ks, &timestamp,
sizeof (timestamp), digest);
rmp->rm_olimit = PRNG_MAXOBLOCKS/2;
rmp->rm_ofuzz = PRNG_MAXOBLOCKS/4;
bcopy(digest, &rmp->rm_counter, sizeof (uint64_t));
oblocks = 0;
rmp->rm_oblocks = nblock;
}
punt:
/* Hash counter to produce prn stream */
if (bytes >= HASHSIZE) {
size = HASHSIZE;
HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter,
sizeof (rmp->rm_counter), ptr);
} else {
size = min(bytes, HASHSIZE);
HMAC_ENCRYPT(&rmp->rm_ks, &rmp->rm_counter,
sizeof (rmp->rm_counter), digest);
bcopy(digest, ptr, size);
}
ptr += size;
bytes -= size;
rmp->rm_counter++;
oblocks++;
nblock--;
} while (bytes > 0);
mutex_exit(&rmp->rm_lock);
return (0);
}
/*
* Per-CPU Random magazines.
*/
static rndmag_t *rndmag;
static uint8_t *rndbuf;
static size_t rndmag_total;
/*
* common/os/cpu.c says that platform support code can shrinkwrap
* max_ncpus. On the off chance that we get loaded very early, we
* read it exactly once, to copy it here.
*/
static uint32_t random_max_ncpus = 0;
/*
* Boot-time tunables, for experimentation.
*/
size_t rndmag_threshold = 32;
size_t rndbuf_len = 64;
size_t rndmag_size = 64;
int
kcf_rnd_get_pseudo_bytes(uint8_t *ptr, size_t len)
{
rndmag_t *rmp;
uint8_t *cptr, *eptr;
/*
* Anyone who asks for zero bytes of randomness should get slapped.
*/
ASSERT(len > 0);
/*
* Fast path.
*/
for (;;) {
rmp = &rndmag[CPU->cpu_seqid];
mutex_enter(&rmp->rm_lock);
/*
* Big requests bypass buffer and tail-call the
* generate routine directly.
*/
if (len > rndmag_threshold) {
BUMP_CPU_RND_STATS(rmp, rs_urndOut, len);
return (rnd_generate_pseudo_bytes(rmp, ptr, len));
}
cptr = rmp->rm_rptr;
eptr = cptr + len;
if (eptr <= rmp->rm_eptr) {
rmp->rm_rptr = eptr;
bcopy(cptr, ptr, len);
BUMP_CPU_RND_STATS(rmp, rs_urndOut, len);
mutex_exit(&rmp->rm_lock);
return (0);
}
/*
* End fast path.
*/
rmp->rm_rptr = rmp->rm_buffer;
/*
* Note: We assume the generate routine always succeeds
* in this case (because it does at present..)
* It also always releases rm_lock.
*/
(void) rnd_generate_pseudo_bytes(rmp, rmp->rm_buffer,
rndbuf_len);
}
}
/*
* We set up (empty) magazines for all of max_ncpus, possibly wasting a
* little memory on big systems that don't have the full set installed.
* See above; "empty" means "rptr equal to eptr"; this will trigger the
* refill path in rnd_get_pseudo_bytes above on the first call for each CPU.
*
* TODO: make rndmag_size tunable at run time!
*/
static void
rnd_alloc_magazines()
{
rndmag_t *rmp;
int i;
rndbuf_len = roundup(rndbuf_len, HASHSIZE);
if (rndmag_size < rndbuf_len)
rndmag_size = rndbuf_len;
rndmag_size = roundup(rndmag_size, RND_CPU_CACHE_SIZE);
random_max_ncpus = max_ncpus;
rndmag_total = rndmag_size * random_max_ncpus;
rndbuf = kmem_alloc(rndmag_total, KM_SLEEP);
rndmag = kmem_zalloc(sizeof (rndmag_t) * random_max_ncpus, KM_SLEEP);
for (i = 0; i < random_max_ncpus; i++) {
uint8_t *buf;
rmp = &rndmag[i];
mutex_init(&rmp->rm_lock, NULL, MUTEX_DRIVER, NULL);
buf = rndbuf + i * rndmag_size;
rmp->rm_buffer = buf;
rmp->rm_eptr = buf + rndbuf_len;
rmp->rm_rptr = buf + rndbuf_len;
rmp->rm_oblocks = 1;
}
}
void
kcf_rnd_schedule_timeout(boolean_t do_mech2id)
{
clock_t ut; /* time in microseconds */
if (do_mech2id)
rngmech_type = crypto_mech2id(SUN_RANDOM);
/*
* The new timeout value is taken from the buffer of random bytes.
* We're merely reading the first 32 bits from the buffer here, not
* consuming any random bytes.
* The timeout multiplier value is a random value between 0.5 sec and
* 1.544480 sec (0.5 sec + 0xFF000 microseconds).
* The new timeout is TIMEOUT_INTERVAL times that multiplier.
*/
ut = 500000 + (clock_t)((((uint32_t)rndpool[findex]) << 12) & 0xFF000);
kcf_rndtimeout_id = timeout(rnd_handler, NULL,
TIMEOUT_INTERVAL * drv_usectohz(ut));
}
/*
* &rnd_pollhead is passed in *phpp in order to indicate the calling thread
* will block. When enough random bytes are available, later, the timeout
* handler routine will issue the pollwakeup() calls.
*/
void
kcf_rnd_chpoll(int anyyet, short *reventsp, struct pollhead **phpp)
{
/*
* Sampling of rnbyte_cnt is an atomic
* operation. Hence we do not need any locking.
*/
if (rnbyte_cnt >= MINEXTRACTBYTES) {
*reventsp |= (POLLIN | POLLRDNORM);
} else {
*reventsp = 0;
if (!anyyet)
*phpp = &rnd_pollhead;
}
}
/*ARGSUSED*/
static void
rnd_handler(void *arg)
{
int len = 0;
if (num_waiters > 0)
len = MAXEXTRACTBYTES;
else if (rnbyte_cnt < RNDPOOLSIZE)
len = MINEXTRACTBYTES;
if (len > 0) {
(void) taskq_dispatch(system_taskq, rngprov_task,
(void *)(uintptr_t)len, TQ_NOSLEEP);
} else if (!kcf_rngprov_check()) {
cmn_err(CE_WARN, "No randomness provider enabled for "
"/dev/random. Use cryptoadm(1M) to enable a provider.");
}
mutex_enter(&rndpool_lock);
/*
* Wake up threads waiting in poll() or for enough accumulated
* random bytes to read from /dev/random. In case a poll() is
* concurrent with a read(), the polling process may be woken up
* indicating that enough randomness is now available for reading,
* and another process *steals* the bits from the pool, causing the
* subsequent read() from the first process to block. It is acceptable
* since the blocking will eventually end, after the timeout
* has expired enough times to honor the read.
*
* Note - Since we hold the rndpool_lock across the pollwakeup() call
* we MUST NOT grab the rndpool_lock in kcf_rndchpoll().
*/
if (rnbyte_cnt >= MINEXTRACTBYTES)
pollwakeup(&rnd_pollhead, POLLIN | POLLRDNORM);
if (num_waiters > 0)
cv_broadcast(&rndpool_read_cv);
mutex_exit(&rndpool_lock);
kcf_rnd_schedule_timeout(B_FALSE);
}
/* Hashing functions */
static void
hmac_key(uint8_t *key, size_t keylen, void *buf)
{
uint32_t *ip, *op;
uint32_t ipad[HMAC_BLOCK_SIZE/sizeof (uint32_t)];
uint32_t opad[HMAC_BLOCK_SIZE/sizeof (uint32_t)];
HASH_CTX *icontext, *ocontext;
int i;
int nints;
icontext = buf;
ocontext = (SHA1_CTX *)((uint8_t *)buf + sizeof (HASH_CTX));
bzero((uchar_t *)ipad, HMAC_BLOCK_SIZE);
bzero((uchar_t *)opad, HMAC_BLOCK_SIZE);
bcopy(key, (uchar_t *)ipad, keylen);
bcopy(key, (uchar_t *)opad, keylen);
/*
* XOR key with ipad (0x36) and opad (0x5c) as defined
* in RFC 2104.
*/
ip = ipad;
op = opad;
nints = HMAC_BLOCK_SIZE/sizeof (uint32_t);
for (i = 0; i < nints; i++) {
ip[i] ^= 0x36363636;
op[i] ^= 0x5c5c5c5c;
}
/* Perform hash with ipad */
HashInit(icontext);
HashUpdate(icontext, (uchar_t *)ipad, HMAC_BLOCK_SIZE);
/* Perform hash with opad */
HashInit(ocontext);
HashUpdate(ocontext, (uchar_t *)opad, HMAC_BLOCK_SIZE);
}
static void
hmac_encr(void *ctx, uint8_t *ptr, size_t len, uint8_t *digest)
{
HASH_CTX *saved_contexts;
HASH_CTX icontext;
HASH_CTX ocontext;
saved_contexts = (HASH_CTX *)ctx;
icontext = saved_contexts[0];
ocontext = saved_contexts[1];
HashUpdate(&icontext, ptr, len);
HashFinal(digest, &icontext);
/*
* Perform Hash(K XOR OPAD, DIGEST), where DIGEST is the
* Hash(K XOR IPAD, DATA).
*/
HashUpdate(&ocontext, digest, HASHSIZE);
HashFinal(digest, &ocontext);
}
static void
rndc_addbytes(uint8_t *ptr, size_t len)
{
ASSERT(ptr != NULL && len > 0);
ASSERT(rnbyte_cnt <= RNDPOOLSIZE);
mutex_enter(&rndpool_lock);
while ((len > 0) && (rnbyte_cnt < RNDPOOLSIZE)) {
rndpool[rindex] ^= *ptr;
ptr++; len--;
rindex = (rindex + 1) & (RNDPOOLSIZE - 1);
rnbyte_cnt++;
}
/* Handle buffer full case */
while (len > 0) {
rndpool[rindex] ^= *ptr;
ptr++; len--;
findex = rindex = (rindex + 1) & (RNDPOOLSIZE - 1);
}
mutex_exit(&rndpool_lock);
}
/*
* Caller should check len <= rnbyte_cnt under the
* rndpool_lock before calling.
*/
static void
rndc_getbytes(uint8_t *ptr, size_t len)
{
ASSERT(MUTEX_HELD(&rndpool_lock));
ASSERT(len <= rnbyte_cnt && rnbyte_cnt <= RNDPOOLSIZE);
BUMP_RND_STATS(rs_rndcOut, len);
while (len > 0) {
*ptr = rndpool[findex];
ptr++; len--;
findex = (findex + 1) & (RNDPOOLSIZE - 1);
rnbyte_cnt--;
}
}
/* Random number exported entry points */
/*
* Mix the supplied bytes into the entropy pool of a kCF
* RNG provider.
*/
/* ARGSUSED */
int
random_add_entropy(uint8_t *ptr, size_t len, uint16_t entropy_est)
{
if (len < 1)
return (-1);
rngprov_seed(ptr, len);
return (0);
}
/*
* Get bytes from the /dev/urandom generator. This function
* always succeeds. Returns 0.
*/
int
random_get_pseudo_bytes(uint8_t *ptr, size_t len)
{
ASSERT(!mutex_owned(&rndpool_lock));
if (len < 1)
return (0);
return (kcf_rnd_get_pseudo_bytes(ptr, len));
}
/*
* Get bytes from the /dev/random generator. Returns 0
* on success. Returns EAGAIN if there is insufficient entropy.
*/
int
random_get_bytes(uint8_t *ptr, size_t len)
{
ASSERT(!mutex_owned(&rndpool_lock));
if (len < 1)
return (0);
return (kcf_rnd_get_bytes(ptr, len, B_TRUE, B_FALSE));
}