gcm.c revision 95fddab55b9e310853e6cd5cd514291ae1c9016f
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _KERNEL
#include <strings.h>
#include <limits.h>
#include <assert.h>
#include <security/cryptoki.h>
#endif /* _KERNEL */
#include <sys/byteorder.h>
#ifdef __amd64
#ifdef _KERNEL
#define KPREEMPT_DISABLE kpreempt_disable()
#define KPREEMPT_ENABLE kpreempt_enable()
#else
#define KPREEMPT_DISABLE
#define KPREEMPT_ENABLE
#endif /* _KERNEL */
static int intel_pclmulqdq_instruction_present(void);
#endif /* __amd64 */
struct aes_block {
uint64_t a;
uint64_t b;
};
/*
* gcm_mul()
* Perform a carry-less multiplication (that is, use XOR instead of the
* multiply operator) on *x_in and *y and place the result in *res.
*
* Byte swap the input (*x_in and *y) and the output (*res).
*
* Note: x_in, y, and res all point to 16-byte numbers (an array of two
* 64-bit integers).
*/
void
{
#ifdef __amd64
if (intel_pclmulqdq_instruction_present()) {
} else
#endif /* __amd64 */
{
static const uint64_t R = 0xe100000000000000ULL;
struct aes_block z = {0, 0};
struct aes_block v;
uint64_t x;
int i, j;
v.a = ntohll(y[0]);
v.b = ntohll(y[1]);
for (j = 0; j < 2; j++) {
for (i = 0; i < 64; i++, x <<= 1) {
if (x & 0x8000000000000000ULL) {
z.a ^= v.a;
z.b ^= v.b;
}
if (v.b & 1ULL) {
v.b = (v.a << 63)|(v.b >> 1);
v.a = (v.a >> 1) ^ R;
} else {
v.b = (v.a << 63)|(v.b >> 1);
v.a = v.a >> 1;
}
}
}
}
}
#define GHASH(c, d, t) \
/*
* Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode
* is done in another function.
*/
int
{
void *iov_or_mp;
/* accumulate bytes here and return */
length);
return (CRYPTO_SUCCESS);
}
do {
/* Unprocessed data from last call. */
if (ctx->gcm_remainder_len > 0) {
return (CRYPTO_DATA_LEN_RANGE);
} else {
}
/*
* Increment counter. Counter bits are confined
* to the bottom 32 bits of the counter block.
*/
counter &= counter_mask;
if (ctx->gcm_remainder_len > 0) {
need);
}
} else {
/* copy block to where it belongs */
if (out_data_1_len == block_size) {
} else {
if (out_data_2 != NULL) {
}
}
/* update offset */
}
/* add ciphertext to the hash */
/* Update pointer to next block of data to be processed. */
if (ctx->gcm_remainder_len != 0) {
ctx->gcm_remainder_len = 0;
} else {
datap += block_size;
}
/* Incomplete last block. */
goto out;
}
} while (remainder > 0);
out:
return (CRYPTO_SUCCESS);
}
/* ARGSUSED */
int
{
int i, rv;
return (CRYPTO_DATA_LEN_RANGE);
}
if (ctx->gcm_remainder_len > 0) {
/*
* Here is where we deal with data that is not a
* multiple of the block size.
*/
/*
* Increment counter.
*/
counter &= counter_mask;
/* XOR with counter block */
for (i = 0; i < ctx->gcm_remainder_len; i++) {
}
/* add ciphertext to the hash */
}
if (ctx->gcm_remainder_len > 0) {
if (rv != CRYPTO_SUCCESS)
return (rv);
}
ctx->gcm_remainder_len = 0;
if (rv != CRYPTO_SUCCESS)
return (rv);
return (CRYPTO_SUCCESS);
}
/*
* This will only deal with decrypting the last block of the input that
* might not be a multiple of block length.
*/
static void
{
int i;
/*
* Increment counter.
* Counter bits are confined to the bottom 32 bits
*/
counter &= counter_mask;
/* authentication tag */
/* add ciphertext to the hash */
/* decrypt remaining ciphertext */
/* XOR with counter block */
for (i = 0; i < ctx->gcm_remainder_len; i++) {
}
}
/* ARGSUSED */
int
{
/*
* Copy contiguous ciphertext input blocks to plaintext buffer.
* Ciphertext will be decrypted in the final.
*/
if (length > 0) {
#ifdef _KERNEL
#else
#endif
return (CRYPTO_HOST_MEMORY);
length);
}
ctx->gcm_remainder_len = 0;
return (CRYPTO_SUCCESS);
}
int
{
while (remainder > 0) {
/* add ciphertext to the hash */
/*
* Increment counter.
* Counter bits are confined to the bottom 32 bits
*/
counter &= counter_mask;
/* XOR with ciphertext */
processed += block_size;
blockp += block_size;
remainder -= block_size;
/* Incomplete last block */
/*
* not expecting anymore ciphertext, just
* compute plaintext for the remaining input
*/
ctx->gcm_remainder_len = 0;
goto out;
}
}
out:
/* compare the input authentication tag with what we calculated */
/* They don't match */
return (CRYPTO_INVALID_MAC);
} else {
if (rv != CRYPTO_SUCCESS)
return (rv);
}
return (CRYPTO_SUCCESS);
}
static int
{
/*
* Check the length of the authentication tag (in bits).
*/
switch (tag_len) {
case 32:
case 64:
case 96:
case 104:
case 112:
case 120:
case 128:
break;
default:
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
return (CRYPTO_MECHANISM_PARAM_INVALID);
return (CRYPTO_SUCCESS);
}
static void
{
if (iv_len == 12) {
cb[12] = 0;
cb[13] = 0;
cb[14] = 0;
/* J0 will be used again in the final */
} else {
/* GHASH the IV */
do {
if (remainder < block_size) {
remainder = 0;
} else {
processed += block_size;
remainder -= block_size;
}
} while (remainder > 0);
len_a_len_c[0] = 0;
/* J0 will be used again in the final */
}
}
/*
* The following function is called at encrypt or decrypt init time
* for AES GCM mode.
*/
int
{
/* encrypt zero block to get subkey H */
processed = 0;
do {
if (remainder < block_size) {
/*
* There's not a block full of data, pad rest of
* buffer with zero
*/
remainder = 0;
} else {
processed += block_size;
remainder -= block_size;
}
/* add auth data to the hash */
} while (remainder > 0);
return (CRYPTO_SUCCESS);
}
int
{
int rv;
return (rv);
}
/* these values are in bits */
rv = CRYPTO_SUCCESS;
} else {
goto out;
}
}
out:
return (rv);
}
int
{
int rv;
/* these values are in bits */
rv = CRYPTO_SUCCESS;
} else {
goto out;
}
}
out:
return (rv);
}
void *
gcm_alloc_ctx(int kmflag)
{
#ifdef _KERNEL
#else
#endif
return (NULL);
return (gcm_ctx);
}
void *
gmac_alloc_ctx(int kmflag)
{
#ifdef _KERNEL
#else
#endif
return (NULL);
return (gcm_ctx);
}
void
{
}
#ifdef __amd64
/*
* Return 1 if executing on Intel with PCLMULQDQ instructions,
* otherwise 0 (i.e., Intel without PCLMULQDQ or AMD64).
* Cache the result, as the CPU can't change.
*
* Note: the userland version uses getisax(). The kernel version uses
* global variable x86_feature or the output of cpuid_insn().
*/
static int
{
static int cached_result = -1;
#ifdef _KERNEL
#ifdef X86_PCLMULQDQ
#else
struct cpuid_regs cpr;
CPUID_INTC_ECX_PCLMULQDQ) != 0);
} else {
cached_result = 0;
}
#endif /* X86_PCLMULQDQ */
#else
#endif /* _KERNEL */
}
return (cached_result);
}
#endif /* __amd64 */