2N/A/* This file was automatically imported with
2N/A import_gcry.py. Please don't modify it */
2N/A#include <grub/dl.h>
2N/AGRUB_MOD_LICENSE ("GPLv3+");
2N/A/* Rijndael (AES) for GnuPG
2N/A * Copyright (C) 2000, 2001, 2002, 2003, 2007,
2N/A * 2008 Free Software Foundation, Inc.
2N/A *
2N/A * This file is part of Libgcrypt.
2N/A *
2N/A * Libgcrypt is free software; you can redistribute it and/or modify
2N/A * it under the terms of the GNU Lesser General Public License as
2N/A * published by the Free Software Foundation; either version 2.1 of
2N/A * the License, or (at your option) any later version.
2N/A *
2N/A * Libgcrypt is distributed in the hope that it will be useful,
2N/A * but WITHOUT ANY WARRANTY; without even the implied warranty of
2N/A * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2N/A * GNU Lesser General Public License for more details.
2N/A *
2N/A * You should have received a copy of the GNU Lesser General Public
2N/A * License along with this program; if not, see <http://www.gnu.org/licenses/>.
2N/A *******************************************************************
2N/A * The code here is based on the optimized implementation taken from
2N/A * http://www.esat.kuleuven.ac.be/~rijmen/rijndael/ on Oct 2, 2000,
2N/A * which carries this notice:
2N/A *------------------------------------------
2N/A * rijndael-alg-fst.c v2.3 April '2000
2N/A *
2N/A * Optimised ANSI C code
2N/A *
2N/A * authors: v1.0: Antoon Bosselaers
2N/A * v2.0: Vincent Rijmen
2N/A * v2.3: Paulo Barreto
2N/A *
2N/A * This code is placed in the public domain.
2N/A *------------------------------------------
2N/A *
2N/A * The SP800-38a document is available at:
2N/A * http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
2N/A *
2N/A */
2N/A
2N/A
2N/A#include "types.h" /* for byte and u32 typedefs */
2N/A#include "g10lib.h"
2N/A#include "cipher.h"
2N/A
2N/A#define MAXKC (256/32)
2N/A#define MAXROUNDS 14
2N/A#define BLOCKSIZE (128/8)
2N/A
2N/A
2N/A/* USE_PADLOCK indicates whether to compile the padlock specific
2N/A code. */
2N/A#undef USE_PADLOCK
2N/A#ifdef ENABLE_PADLOCK_SUPPORT
2N/A# if defined (__i386__) && SIZEOF_UNSIGNED_LONG == 4 && defined (__GNUC__)
2N/A# define USE_PADLOCK
2N/A# endif
2N/A#endif /*ENABLE_PADLOCK_SUPPORT*/
2N/A
2N/A
2N/Atypedef struct
2N/A{
2N/A int ROUNDS; /* Key-length-dependent number of rounds. */
2N/A int decryption_prepared; /* The decryption key schedule is available. */
2N/A#ifdef USE_PADLOCK
2N/A int use_padlock; /* Padlock shall be used. */
2N/A /* The key as passed to the padlock engine. */
2N/A unsigned char padlock_key[16] __attribute__ ((aligned (16)));
2N/A#endif
2N/A union
2N/A {
2N/A PROPERLY_ALIGNED_TYPE dummy;
2N/A byte keyschedule[MAXROUNDS+1][4][4];
2N/A } u1;
2N/A union
2N/A {
2N/A PROPERLY_ALIGNED_TYPE dummy;
2N/A byte keyschedule[MAXROUNDS+1][4][4];
2N/A } u2;
2N/A} RIJNDAEL_context;
2N/A
2N/A#define keySched u1.keyschedule
2N/A#define keySched2 u2.keyschedule
2N/A
2N/A/* All the numbers. */
2N/A#include "rijndael-tables.h"
2N/A
2N/A
2N/A/* Perform the key setup. */
2N/Astatic gcry_err_code_t
2N/Ado_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
2N/A{
2N/A static int initialized = 0;
2N/A static const char *selftest_failed=0;
2N/A int ROUNDS;
2N/A int i,j, r, t, rconpointer = 0;
2N/A int KC;
2N/A union
2N/A {
2N/A PROPERLY_ALIGNED_TYPE dummy;
2N/A byte k[MAXKC][4];
2N/A } k;
2N/A#define k k.k
2N/A union
2N/A {
2N/A PROPERLY_ALIGNED_TYPE dummy;
2N/A byte tk[MAXKC][4];
2N/A } tk;
2N/A#define tk tk.tk
2N/A
2N/A /* The on-the-fly self tests are only run in non-fips mode. In fips
2N/A mode explicit self-tests are required. Actually the on-the-fly
2N/A self-tests are not fully thread-safe and it might happen that a
2N/A failed self-test won't get noticed in another thread.
2N/A
2N/A FIXME: We might want to have a central registry of succeeded
2N/A self-tests. */
2N/A if (!fips_mode () && !initialized)
2N/A {
2N/A initialized = 1;
2N/A selftest_failed = selftest ();
2N/A if (selftest_failed)
2N/A log_error ("%s\n", selftest_failed );
2N/A }
2N/A if (selftest_failed)
2N/A return GPG_ERR_SELFTEST_FAILED;
2N/A
2N/A ctx->decryption_prepared = 0;
2N/A#ifdef USE_PADLOCK
2N/A ctx->use_padlock = 0;
2N/A#endif
2N/A
2N/A if( keylen == 128/8 )
2N/A {
2N/A ROUNDS = 10;
2N/A KC = 4;
2N/A#ifdef USE_PADLOCK
2N/A if ((_gcry_get_hw_features () & HWF_PADLOCK_AES))
2N/A {
2N/A ctx->use_padlock = 1;
2N/A memcpy (ctx->padlock_key, key, keylen);
2N/A }
2N/A#endif
2N/A }
2N/A else if ( keylen == 192/8 )
2N/A {
2N/A ROUNDS = 12;
2N/A KC = 6;
2N/A }
2N/A else if ( keylen == 256/8 )
2N/A {
2N/A ROUNDS = 14;
2N/A KC = 8;
2N/A }
2N/A else
2N/A return GPG_ERR_INV_KEYLEN;
2N/A
2N/A ctx->ROUNDS = ROUNDS;
2N/A
2N/A#ifdef USE_PADLOCK
2N/A if (ctx->use_padlock)
2N/A {
2N/A /* Nothing to do as we support only hardware key generation for
2N/A now. */
2N/A }
2N/A else
2N/A#endif /*USE_PADLOCK*/
2N/A {
2N/A#define W (ctx->keySched)
2N/A for (i = 0; i < (int)keylen; i++)
2N/A {
2N/A k[i >> 2][i & 3] = key[i];
2N/A }
2N/A
2N/A for (j = KC-1; j >= 0; j--)
2N/A {
2N/A *((u32*)tk[j]) = *((u32*)k[j]);
2N/A }
2N/A r = 0;
2N/A t = 0;
2N/A /* Copy values into round key array. */
2N/A for (j = 0; (j < KC) && (r < ROUNDS + 1); )
2N/A {
2N/A for (; (j < KC) && (t < 4); j++, t++)
2N/A {
2N/A *((u32*)W[r][t]) = *((u32*)tk[j]);
2N/A }
2N/A if (t == 4)
2N/A {
2N/A r++;
2N/A t = 0;
2N/A }
2N/A }
2N/A
2N/A while (r < ROUNDS + 1)
2N/A {
2N/A /* While not enough round key material calculated calculate
2N/A new values. */
2N/A tk[0][0] ^= S[tk[KC-1][1]];
2N/A tk[0][1] ^= S[tk[KC-1][2]];
2N/A tk[0][2] ^= S[tk[KC-1][3]];
2N/A tk[0][3] ^= S[tk[KC-1][0]];
2N/A tk[0][0] ^= rcon[rconpointer++];
2N/A
2N/A if (KC != 8)
2N/A {
2N/A for (j = 1; j < KC; j++)
2N/A {
2N/A *((u32*)tk[j]) ^= *((u32*)tk[j-1]);
2N/A }
2N/A }
2N/A else
2N/A {
2N/A for (j = 1; j < KC/2; j++)
2N/A {
2N/A *((u32*)tk[j]) ^= *((u32*)tk[j-1]);
2N/A }
2N/A tk[KC/2][0] ^= S[tk[KC/2 - 1][0]];
2N/A tk[KC/2][1] ^= S[tk[KC/2 - 1][1]];
2N/A tk[KC/2][2] ^= S[tk[KC/2 - 1][2]];
2N/A tk[KC/2][3] ^= S[tk[KC/2 - 1][3]];
2N/A for (j = KC/2 + 1; j < KC; j++)
2N/A {
2N/A *((u32*)tk[j]) ^= *((u32*)tk[j-1]);
2N/A }
2N/A }
2N/A
2N/A /* Copy values into round key array. */
2N/A for (j = 0; (j < KC) && (r < ROUNDS + 1); )
2N/A {
2N/A for (; (j < KC) && (t < 4); j++, t++)
2N/A {
2N/A *((u32*)W[r][t]) = *((u32*)tk[j]);
2N/A }
2N/A if (t == 4)
2N/A {
2N/A r++;
2N/A t = 0;
2N/A }
2N/A }
2N/A }
2N/A#undef W
2N/A }
2N/A
2N/A return 0;
2N/A#undef tk
2N/A#undef k
2N/A}
2N/A
2N/A
2N/Astatic gcry_err_code_t
2N/Arijndael_setkey (void *context, const byte *key, const unsigned keylen)
2N/A{
2N/A RIJNDAEL_context *ctx = context;
2N/A
2N/A int rc = do_setkey (ctx, key, keylen);
2N/A _gcry_burn_stack ( 100 + 16*sizeof(int));
2N/A return rc;
2N/A}
2N/A
2N/A
2N/A/* Make a decryption key from an encryption key. */
2N/Astatic void
2N/Aprepare_decryption( RIJNDAEL_context *ctx )
2N/A{
2N/A int r;
2N/A union
2N/A {
2N/A PROPERLY_ALIGNED_TYPE dummy;
2N/A byte *w;
2N/A } w;
2N/A#define w w.w
2N/A
2N/A for (r=0; r < MAXROUNDS+1; r++ )
2N/A {
2N/A *((u32*)ctx->keySched2[r][0]) = *((u32*)ctx->keySched[r][0]);
2N/A *((u32*)ctx->keySched2[r][1]) = *((u32*)ctx->keySched[r][1]);
2N/A *((u32*)ctx->keySched2[r][2]) = *((u32*)ctx->keySched[r][2]);
2N/A *((u32*)ctx->keySched2[r][3]) = *((u32*)ctx->keySched[r][3]);
2N/A }
2N/A#define W (ctx->keySched2)
2N/A for (r = 1; r < ctx->ROUNDS; r++)
2N/A {
2N/A w = W[r][0];
2N/A *((u32*)w) = *((u32*)U1[w[0]]) ^ *((u32*)U2[w[1]])
2N/A ^ *((u32*)U3[w[2]]) ^ *((u32*)U4[w[3]]);
2N/A
2N/A w = W[r][1];
2N/A *((u32*)w) = *((u32*)U1[w[0]]) ^ *((u32*)U2[w[1]])
2N/A ^ *((u32*)U3[w[2]]) ^ *((u32*)U4[w[3]]);
2N/A
2N/A w = W[r][2];
2N/A *((u32*)w) = *((u32*)U1[w[0]]) ^ *((u32*)U2[w[1]])
2N/A ^ *((u32*)U3[w[2]]) ^ *((u32*)U4[w[3]]);
2N/A
2N/A w = W[r][3];
2N/A *((u32*)w) = *((u32*)U1[w[0]]) ^ *((u32*)U2[w[1]])
2N/A ^ *((u32*)U3[w[2]]) ^ *((u32*)U4[w[3]]);
2N/A }
2N/A#undef W
2N/A#undef w
2N/A}
2N/A
2N/A
2N/A
2N/A#pragma GCC diagnostic ignored "-Wstrict-aliasing"
2N/A/* Encrypt one block. A and B need to be aligned on a 4 byte
2N/A boundary. A and B may be the same. */
2N/Astatic void
2N/Ado_encrypt_aligned (const RIJNDAEL_context *ctx,
2N/A unsigned char *b, const unsigned char *a)
2N/A{
2N/A#define rk (ctx->keySched)
2N/A int ROUNDS = ctx->ROUNDS;
2N/A int r;
2N/A union
2N/A {
2N/A u32 tempu32[4]; /* Force correct alignment. */
2N/A byte temp[4][4];
2N/A } u;
2N/A
2N/A *((u32*)u.temp[0]) = *((u32*)(a )) ^ *((u32*)rk[0][0]);
2N/A *((u32*)u.temp[1]) = *((u32*)(a+ 4)) ^ *((u32*)rk[0][1]);
2N/A *((u32*)u.temp[2]) = *((u32*)(a+ 8)) ^ *((u32*)rk[0][2]);
2N/A *((u32*)u.temp[3]) = *((u32*)(a+12)) ^ *((u32*)rk[0][3]);
2N/A *((u32*)(b )) = (*((u32*)T1[u.temp[0][0]])
2N/A ^ *((u32*)T2[u.temp[1][1]])
2N/A ^ *((u32*)T3[u.temp[2][2]])
2N/A ^ *((u32*)T4[u.temp[3][3]]));
2N/A *((u32*)(b + 4)) = (*((u32*)T1[u.temp[1][0]])
2N/A ^ *((u32*)T2[u.temp[2][1]])
2N/A ^ *((u32*)T3[u.temp[3][2]])
2N/A ^ *((u32*)T4[u.temp[0][3]]));
2N/A *((u32*)(b + 8)) = (*((u32*)T1[u.temp[2][0]])
2N/A ^ *((u32*)T2[u.temp[3][1]])
2N/A ^ *((u32*)T3[u.temp[0][2]])
2N/A ^ *((u32*)T4[u.temp[1][3]]));
2N/A *((u32*)(b +12)) = (*((u32*)T1[u.temp[3][0]])
2N/A ^ *((u32*)T2[u.temp[0][1]])
2N/A ^ *((u32*)T3[u.temp[1][2]])
2N/A ^ *((u32*)T4[u.temp[2][3]]));
2N/A
2N/A for (r = 1; r < ROUNDS-1; r++)
2N/A {
2N/A *((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[r][0]);
2N/A *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[r][1]);
2N/A *((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[r][2]);
2N/A *((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[r][3]);
2N/A
2N/A *((u32*)(b )) = (*((u32*)T1[u.temp[0][0]])
2N/A ^ *((u32*)T2[u.temp[1][1]])
2N/A ^ *((u32*)T3[u.temp[2][2]])
2N/A ^ *((u32*)T4[u.temp[3][3]]));
2N/A *((u32*)(b + 4)) = (*((u32*)T1[u.temp[1][0]])
2N/A ^ *((u32*)T2[u.temp[2][1]])
2N/A ^ *((u32*)T3[u.temp[3][2]])
2N/A ^ *((u32*)T4[u.temp[0][3]]));
2N/A *((u32*)(b + 8)) = (*((u32*)T1[u.temp[2][0]])
2N/A ^ *((u32*)T2[u.temp[3][1]])
2N/A ^ *((u32*)T3[u.temp[0][2]])
2N/A ^ *((u32*)T4[u.temp[1][3]]));
2N/A *((u32*)(b +12)) = (*((u32*)T1[u.temp[3][0]])
2N/A ^ *((u32*)T2[u.temp[0][1]])
2N/A ^ *((u32*)T3[u.temp[1][2]])
2N/A ^ *((u32*)T4[u.temp[2][3]]));
2N/A }
2N/A
2N/A /* Last round is special. */
2N/A *((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[ROUNDS-1][0]);
2N/A *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[ROUNDS-1][1]);
2N/A *((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[ROUNDS-1][2]);
2N/A *((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[ROUNDS-1][3]);
2N/A b[ 0] = T1[u.temp[0][0]][1];
2N/A b[ 1] = T1[u.temp[1][1]][1];
2N/A b[ 2] = T1[u.temp[2][2]][1];
2N/A b[ 3] = T1[u.temp[3][3]][1];
2N/A b[ 4] = T1[u.temp[1][0]][1];
2N/A b[ 5] = T1[u.temp[2][1]][1];
2N/A b[ 6] = T1[u.temp[3][2]][1];
2N/A b[ 7] = T1[u.temp[0][3]][1];
2N/A b[ 8] = T1[u.temp[2][0]][1];
2N/A b[ 9] = T1[u.temp[3][1]][1];
2N/A b[10] = T1[u.temp[0][2]][1];
2N/A b[11] = T1[u.temp[1][3]][1];
2N/A b[12] = T1[u.temp[3][0]][1];
2N/A b[13] = T1[u.temp[0][1]][1];
2N/A b[14] = T1[u.temp[1][2]][1];
2N/A b[15] = T1[u.temp[2][3]][1];
2N/A *((u32*)(b )) ^= *((u32*)rk[ROUNDS][0]);
2N/A *((u32*)(b+ 4)) ^= *((u32*)rk[ROUNDS][1]);
2N/A *((u32*)(b+ 8)) ^= *((u32*)rk[ROUNDS][2]);
2N/A *((u32*)(b+12)) ^= *((u32*)rk[ROUNDS][3]);
2N/A#undef rk
2N/A}
2N/A
2N/A
2N/Astatic void
2N/Ado_encrypt (const RIJNDAEL_context *ctx,
2N/A unsigned char *bx, const unsigned char *ax)
2N/A{
2N/A /* BX and AX are not necessary correctly aligned. Thus we need to
2N/A copy them here. */
2N/A union
2N/A {
2N/A u32 dummy[4];
2N/A byte a[16];
2N/A } a;
2N/A union
2N/A {
2N/A u32 dummy[4];
2N/A byte b[16];
2N/A } b;
2N/A
2N/A memcpy (a.a, ax, 16);
2N/A do_encrypt_aligned (ctx, b.b, a.a);
2N/A memcpy (bx, b.b, 16);
2N/A}
2N/A
2N/A
2N/A/* Encrypt or decrypt one block using the padlock engine. A and B may
2N/A be the same. */
2N/A#ifdef USE_PADLOCK
2N/Astatic void
2N/Ado_padlock (const RIJNDAEL_context *ctx, int decrypt_flag,
2N/A unsigned char *bx, const unsigned char *ax)
2N/A{
2N/A /* BX and AX are not necessary correctly aligned. Thus we need to
2N/A copy them here. */
2N/A unsigned char a[16] __attribute__ ((aligned (16)));
2N/A unsigned char b[16] __attribute__ ((aligned (16)));
2N/A unsigned int cword[4] __attribute__ ((aligned (16)));
2N/A
2N/A /* The control word fields are:
2N/A 127:12 11:10 9 8 7 6 5 4 3:0
2N/A RESERVED KSIZE CRYPT INTER KEYGN CIPHR ALIGN DGEST ROUND */
2N/A cword[0] = (ctx->ROUNDS & 15); /* (The mask is just a safeguard.) */
2N/A cword[1] = 0;
2N/A cword[2] = 0;
2N/A cword[3] = 0;
2N/A if (decrypt_flag)
2N/A cword[0] |= 0x00000200;
2N/A
2N/A memcpy (a, ax, 16);
2N/A
2N/A asm volatile
2N/A ("pushfl\n\t" /* Force key reload. */
2N/A "popfl\n\t"
2N/A "xchg %3, %%ebx\n\t" /* Load key. */
2N/A "movl $1, %%ecx\n\t" /* Init counter for just one block. */
2N/A ".byte 0xf3, 0x0f, 0xa7, 0xc8\n\t" /* REP XSTORE ECB. */
2N/A "xchg %3, %%ebx\n" /* Restore GOT register. */
2N/A : /* No output */
2N/A : "S" (a), "D" (b), "d" (cword), "r" (ctx->padlock_key)
2N/A : "%ecx", "cc", "memory"
2N/A );
2N/A
2N/A memcpy (bx, b, 16);
2N/A
2N/A}
2N/A#endif /*USE_PADLOCK*/
2N/A
2N/A
2N/Astatic void
2N/Arijndael_encrypt (void *context, byte *b, const byte *a)
2N/A{
2N/A RIJNDAEL_context *ctx = context;
2N/A
2N/A#ifdef USE_PADLOCK
2N/A if (ctx->use_padlock)
2N/A {
2N/A do_padlock (ctx, 0, b, a);
2N/A _gcry_burn_stack (48 + 15 /* possible padding for alignment */);
2N/A }
2N/A else
2N/A#endif /*USE_PADLOCK*/
2N/A {
2N/A do_encrypt (ctx, b, a);
2N/A _gcry_burn_stack (48 + 2*sizeof(int));
2N/A }
2N/A}
2N/A
2N/A
2N/A/* Bulk encryption of complete blocks in CFB mode. Caller needs to
2N/A make sure that IV is aligned on an unsigned long boundary. This
2N/A function is only intended for the bulk encryption feature of
2N/A cipher.c. */
2N/A
2N/A
2N/A/* Bulk encryption of complete blocks in CBC mode. Caller needs to
2N/A make sure that IV is aligned on an unsigned long boundary. This
2N/A function is only intended for the bulk encryption feature of
2N/A cipher.c. */
2N/A
2N/A
2N/A
2N/A/* Decrypt one block. A and B need to be aligned on a 4 byte boundary
2N/A and the decryption must have been prepared. A and B may be the
2N/A same. */
2N/Astatic void
2N/Ado_decrypt_aligned (RIJNDAEL_context *ctx,
2N/A unsigned char *b, const unsigned char *a)
2N/A{
2N/A#define rk (ctx->keySched2)
2N/A int ROUNDS = ctx->ROUNDS;
2N/A int r;
2N/A union
2N/A {
2N/A u32 tempu32[4]; /* Force correct alignment. */
2N/A byte temp[4][4];
2N/A } u;
2N/A
2N/A
2N/A *((u32*)u.temp[0]) = *((u32*)(a )) ^ *((u32*)rk[ROUNDS][0]);
2N/A *((u32*)u.temp[1]) = *((u32*)(a+ 4)) ^ *((u32*)rk[ROUNDS][1]);
2N/A *((u32*)u.temp[2]) = *((u32*)(a+ 8)) ^ *((u32*)rk[ROUNDS][2]);
2N/A *((u32*)u.temp[3]) = *((u32*)(a+12)) ^ *((u32*)rk[ROUNDS][3]);
2N/A
2N/A *((u32*)(b )) = (*((u32*)T5[u.temp[0][0]])
2N/A ^ *((u32*)T6[u.temp[3][1]])
2N/A ^ *((u32*)T7[u.temp[2][2]])
2N/A ^ *((u32*)T8[u.temp[1][3]]));
2N/A *((u32*)(b+ 4)) = (*((u32*)T5[u.temp[1][0]])
2N/A ^ *((u32*)T6[u.temp[0][1]])
2N/A ^ *((u32*)T7[u.temp[3][2]])
2N/A ^ *((u32*)T8[u.temp[2][3]]));
2N/A *((u32*)(b+ 8)) = (*((u32*)T5[u.temp[2][0]])
2N/A ^ *((u32*)T6[u.temp[1][1]])
2N/A ^ *((u32*)T7[u.temp[0][2]])
2N/A ^ *((u32*)T8[u.temp[3][3]]));
2N/A *((u32*)(b+12)) = (*((u32*)T5[u.temp[3][0]])
2N/A ^ *((u32*)T6[u.temp[2][1]])
2N/A ^ *((u32*)T7[u.temp[1][2]])
2N/A ^ *((u32*)T8[u.temp[0][3]]));
2N/A
2N/A for (r = ROUNDS-1; r > 1; r--)
2N/A {
2N/A *((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[r][0]);
2N/A *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[r][1]);
2N/A *((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[r][2]);
2N/A *((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[r][3]);
2N/A *((u32*)(b )) = (*((u32*)T5[u.temp[0][0]])
2N/A ^ *((u32*)T6[u.temp[3][1]])
2N/A ^ *((u32*)T7[u.temp[2][2]])
2N/A ^ *((u32*)T8[u.temp[1][3]]));
2N/A *((u32*)(b+ 4)) = (*((u32*)T5[u.temp[1][0]])
2N/A ^ *((u32*)T6[u.temp[0][1]])
2N/A ^ *((u32*)T7[u.temp[3][2]])
2N/A ^ *((u32*)T8[u.temp[2][3]]));
2N/A *((u32*)(b+ 8)) = (*((u32*)T5[u.temp[2][0]])
2N/A ^ *((u32*)T6[u.temp[1][1]])
2N/A ^ *((u32*)T7[u.temp[0][2]])
2N/A ^ *((u32*)T8[u.temp[3][3]]));
2N/A *((u32*)(b+12)) = (*((u32*)T5[u.temp[3][0]])
2N/A ^ *((u32*)T6[u.temp[2][1]])
2N/A ^ *((u32*)T7[u.temp[1][2]])
2N/A ^ *((u32*)T8[u.temp[0][3]]));
2N/A }
2N/A
2N/A /* Last round is special. */
2N/A *((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[1][0]);
2N/A *((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[1][1]);
2N/A *((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[1][2]);
2N/A *((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[1][3]);
2N/A b[ 0] = S5[u.temp[0][0]];
2N/A b[ 1] = S5[u.temp[3][1]];
2N/A b[ 2] = S5[u.temp[2][2]];
2N/A b[ 3] = S5[u.temp[1][3]];
2N/A b[ 4] = S5[u.temp[1][0]];
2N/A b[ 5] = S5[u.temp[0][1]];
2N/A b[ 6] = S5[u.temp[3][2]];
2N/A b[ 7] = S5[u.temp[2][3]];
2N/A b[ 8] = S5[u.temp[2][0]];
2N/A b[ 9] = S5[u.temp[1][1]];
2N/A b[10] = S5[u.temp[0][2]];
2N/A b[11] = S5[u.temp[3][3]];
2N/A b[12] = S5[u.temp[3][0]];
2N/A b[13] = S5[u.temp[2][1]];
2N/A b[14] = S5[u.temp[1][2]];
2N/A b[15] = S5[u.temp[0][3]];
2N/A *((u32*)(b )) ^= *((u32*)rk[0][0]);
2N/A *((u32*)(b+ 4)) ^= *((u32*)rk[0][1]);
2N/A *((u32*)(b+ 8)) ^= *((u32*)rk[0][2]);
2N/A *((u32*)(b+12)) ^= *((u32*)rk[0][3]);
2N/A#undef rk
2N/A}
2N/A
2N/A
2N/A/* Decrypt one block. AX and BX may be the same. */
2N/Astatic void
2N/Ado_decrypt (RIJNDAEL_context *ctx, byte *bx, const byte *ax)
2N/A{
2N/A /* BX and AX are not necessary correctly aligned. Thus we need to
2N/A copy them here. */
2N/A union
2N/A {
2N/A u32 dummy[4];
2N/A byte a[16];
2N/A } a;
2N/A union
2N/A {
2N/A u32 dummy[4];
2N/A byte b[16];
2N/A } b;
2N/A
2N/A if ( !ctx->decryption_prepared )
2N/A {
2N/A prepare_decryption ( ctx );
2N/A _gcry_burn_stack (64);
2N/A ctx->decryption_prepared = 1;
2N/A }
2N/A
2N/A memcpy (a.a, ax, 16);
2N/A do_decrypt_aligned (ctx, b.b, a.a);
2N/A memcpy (bx, b.b, 16);
2N/A#undef rk
2N/A}
2N/A
2N/A
2N/A
2N/A
2N/Astatic void
2N/Arijndael_decrypt (void *context, byte *b, const byte *a)
2N/A{
2N/A RIJNDAEL_context *ctx = context;
2N/A
2N/A#ifdef USE_PADLOCK
2N/A if (ctx->use_padlock)
2N/A {
2N/A do_padlock (ctx, 1, b, a);
2N/A _gcry_burn_stack (48 + 2*sizeof(int) /* FIXME */);
2N/A }
2N/A else
2N/A#endif /*USE_PADLOCK*/
2N/A {
2N/A do_decrypt (ctx, b, a);
2N/A _gcry_burn_stack (48+2*sizeof(int));
2N/A }
2N/A}
2N/A
2N/A
2N/A/* Bulk decryption of complete blocks in CFB mode. Caller needs to
2N/A make sure that IV is aligned on an unisgned lonhg boundary. This
2N/A function is only intended for the bulk encryption feature of
2N/A cipher.c. */
2N/A
2N/A
2N/A/* Bulk decryption of complete blocks in CBC mode. Caller needs to
2N/A make sure that IV is aligned on an unsigned long boundary. This
2N/A function is only intended for the bulk encryption feature of
2N/A cipher.c. */
2N/A
2N/A
2N/A
2N/A
2N/A/* Run the self-tests for AES 128. Returns NULL on success. */
2N/A
2N/A/* Run the self-tests for AES 192. Returns NULL on success. */
2N/A
2N/A
2N/A/* Run the self-tests for AES 256. Returns NULL on success. */
2N/A
2N/A/* Run all the self-tests and return NULL on success. This function
2N/A is used for the on-the-fly self-tests. */
2N/A
2N/A
2N/A/* SP800-38a.pdf for AES-128. */
2N/A
2N/A
2N/A/* Complete selftest for AES-128 with all modes and driver code. */
2N/A
2N/A/* Complete selftest for AES-192. */
2N/A
2N/A
2N/A/* Complete selftest for AES-256. */
2N/A
2N/A
2N/A
2N/A/* Run a full self-test for ALGO and return 0 on success. */
2N/A
2N/A
2N/A
2N/A
2N/Astatic const char *rijndael_names[] =
2N/A {
2N/A "RIJNDAEL",
2N/A "AES128",
2N/A "AES-128",
2N/A NULL
2N/A };
2N/A
2N/Astatic gcry_cipher_oid_spec_t rijndael_oids[] =
2N/A {
2N/A { "2.16.840.1.101.3.4.1.1", GCRY_CIPHER_MODE_ECB },
2N/A { "2.16.840.1.101.3.4.1.2", GCRY_CIPHER_MODE_CBC },
2N/A { "2.16.840.1.101.3.4.1.3", GCRY_CIPHER_MODE_OFB },
2N/A { "2.16.840.1.101.3.4.1.4", GCRY_CIPHER_MODE_CFB },
2N/A { NULL }
2N/A };
2N/A
2N/Agcry_cipher_spec_t _gcry_cipher_spec_aes =
2N/A {
2N/A "AES", rijndael_names, rijndael_oids, 16, 128, sizeof (RIJNDAEL_context),
2N/A rijndael_setkey, rijndael_encrypt, rijndael_decrypt
2N/A ,
2N/A#ifdef GRUB_UTIL
2N/A .modname = "gcry_rijndael",
2N/A#endif
2N/A };
2N/A
2N/Astatic const char *rijndael192_names[] =
2N/A {
2N/A "RIJNDAEL192",
2N/A "AES-192",
2N/A NULL
2N/A };
2N/A
2N/Astatic gcry_cipher_oid_spec_t rijndael192_oids[] =
2N/A {
2N/A { "2.16.840.1.101.3.4.1.21", GCRY_CIPHER_MODE_ECB },
2N/A { "2.16.840.1.101.3.4.1.22", GCRY_CIPHER_MODE_CBC },
2N/A { "2.16.840.1.101.3.4.1.23", GCRY_CIPHER_MODE_OFB },
2N/A { "2.16.840.1.101.3.4.1.24", GCRY_CIPHER_MODE_CFB },
2N/A { NULL }
2N/A };
2N/A
2N/Agcry_cipher_spec_t _gcry_cipher_spec_aes192 =
2N/A {
2N/A "AES192", rijndael192_names, rijndael192_oids, 16, 192, sizeof (RIJNDAEL_context),
2N/A rijndael_setkey, rijndael_encrypt, rijndael_decrypt
2N/A ,
2N/A#ifdef GRUB_UTIL
2N/A .modname = "gcry_rijndael",
2N/A#endif
2N/A };
2N/A
2N/Astatic const char *rijndael256_names[] =
2N/A {
2N/A "RIJNDAEL256",
2N/A "AES-256",
2N/A NULL
2N/A };
2N/A
2N/Astatic gcry_cipher_oid_spec_t rijndael256_oids[] =
2N/A {
2N/A { "2.16.840.1.101.3.4.1.41", GCRY_CIPHER_MODE_ECB },
2N/A { "2.16.840.1.101.3.4.1.42", GCRY_CIPHER_MODE_CBC },
2N/A { "2.16.840.1.101.3.4.1.43", GCRY_CIPHER_MODE_OFB },
2N/A { "2.16.840.1.101.3.4.1.44", GCRY_CIPHER_MODE_CFB },
2N/A { NULL }
2N/A };
2N/A
2N/Agcry_cipher_spec_t _gcry_cipher_spec_aes256 =
2N/A {
2N/A "AES256", rijndael256_names, rijndael256_oids, 16, 256,
2N/A sizeof (RIJNDAEL_context),
2N/A rijndael_setkey, rijndael_encrypt, rijndael_decrypt
2N/A ,
2N/A#ifdef GRUB_UTIL
2N/A .modname = "gcry_rijndael",
2N/A#endif
2N/A };
2N/A
2N/A
2N/A
2N/AGRUB_MOD_INIT(gcry_rijndael)
2N/A{
2N/A grub_cipher_register (&_gcry_cipher_spec_aes);
2N/A grub_cipher_register (&_gcry_cipher_spec_aes192);
2N/A grub_cipher_register (&_gcry_cipher_spec_aes256);
2N/A}
2N/A
2N/AGRUB_MOD_FINI(gcry_rijndael)
2N/A{
2N/A grub_cipher_unregister (&_gcry_cipher_spec_aes);
2N/A grub_cipher_unregister (&_gcry_cipher_spec_aes192);
2N/A grub_cipher_unregister (&_gcry_cipher_spec_aes256);
2N/A}