LCOV - code coverage report
Current view: top level - lib/crypto - aes.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 0 126 0.0 %
Date: 2021-04-22 12:43:58 Functions: 0 7 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (C) 2017-2019 Linaro Ltd <ard.biesheuvel@linaro.org>
       4             :  */
       5             : 
       6             : #include <crypto/aes.h>
       7             : #include <linux/crypto.h>
       8             : #include <linux/module.h>
       9             : #include <asm/unaligned.h>
      10             : 
      11             : /*
      12             :  * Emit the sbox as volatile const to prevent the compiler from doing
      13             :  * constant folding on sbox references involving fixed indexes.
      14             :  */
      15             : static volatile const u8 __cacheline_aligned aes_sbox[] = {
      16             :         0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
      17             :         0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
      18             :         0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
      19             :         0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
      20             :         0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
      21             :         0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
      22             :         0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
      23             :         0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
      24             :         0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
      25             :         0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
      26             :         0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
      27             :         0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
      28             :         0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
      29             :         0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
      30             :         0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
      31             :         0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
      32             :         0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
      33             :         0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
      34             :         0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
      35             :         0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
      36             :         0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
      37             :         0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
      38             :         0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
      39             :         0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
      40             :         0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
      41             :         0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
      42             :         0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
      43             :         0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
      44             :         0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
      45             :         0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
      46             :         0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
      47             :         0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
      48             : };
      49             : 
      50             : static volatile const u8 __cacheline_aligned aes_inv_sbox[] = {
      51             :         0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
      52             :         0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
      53             :         0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
      54             :         0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
      55             :         0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
      56             :         0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
      57             :         0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
      58             :         0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
      59             :         0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
      60             :         0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
      61             :         0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
      62             :         0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
      63             :         0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
      64             :         0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
      65             :         0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
      66             :         0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
      67             :         0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
      68             :         0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
      69             :         0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
      70             :         0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
      71             :         0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
      72             :         0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
      73             :         0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
      74             :         0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
      75             :         0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
      76             :         0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
      77             :         0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
      78             :         0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
      79             :         0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
      80             :         0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
      81             :         0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
      82             :         0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
      83             : };
      84             : 
      85             : extern const u8 crypto_aes_sbox[256] __alias(aes_sbox);
      86             : extern const u8 crypto_aes_inv_sbox[256] __alias(aes_inv_sbox);
      87             : 
      88             : EXPORT_SYMBOL(crypto_aes_sbox);
      89             : EXPORT_SYMBOL(crypto_aes_inv_sbox);
      90             : 
      91           0 : static u32 mul_by_x(u32 w)
      92             : {
      93           0 :         u32 x = w & 0x7f7f7f7f;
      94           0 :         u32 y = w & 0x80808080;
      95             : 
      96             :         /* multiply by polynomial 'x' (0b10) in GF(2^8) */
      97           0 :         return (x << 1) ^ (y >> 7) * 0x1b;
      98             : }
      99             : 
     100           0 : static u32 mul_by_x2(u32 w)
     101             : {
     102           0 :         u32 x = w & 0x3f3f3f3f;
     103           0 :         u32 y = w & 0x80808080;
     104           0 :         u32 z = w & 0x40404040;
     105             : 
     106             :         /* multiply by polynomial 'x^2' (0b100) in GF(2^8) */
     107           0 :         return (x << 2) ^ (y >> 7) * 0x36 ^ (z >> 6) * 0x1b;
     108             : }
     109             : 
     110           0 : static u32 mix_columns(u32 x)
     111             : {
     112             :         /*
     113             :          * Perform the following matrix multiplication in GF(2^8)
     114             :          *
     115             :          * | 0x2 0x3 0x1 0x1 |   | x[0] |
     116             :          * | 0x1 0x2 0x3 0x1 |   | x[1] |
     117             :          * | 0x1 0x1 0x2 0x3 | x | x[2] |
     118             :          * | 0x3 0x1 0x1 0x2 |   | x[3] |
     119             :          */
     120           0 :         u32 y = mul_by_x(x) ^ ror32(x, 16);
     121             : 
     122           0 :         return y ^ ror32(x ^ y, 8);
     123             : }
     124             : 
     125           0 : static u32 inv_mix_columns(u32 x)
     126             : {
     127             :         /*
     128             :          * Perform the following matrix multiplication in GF(2^8)
     129             :          *
     130             :          * | 0xe 0xb 0xd 0x9 |   | x[0] |
     131             :          * | 0x9 0xe 0xb 0xd |   | x[1] |
     132             :          * | 0xd 0x9 0xe 0xb | x | x[2] |
     133             :          * | 0xb 0xd 0x9 0xe |   | x[3] |
     134             :          *
     135             :          * which can conveniently be reduced to
     136             :          *
     137             :          * | 0x2 0x3 0x1 0x1 |   | 0x5 0x0 0x4 0x0 |   | x[0] |
     138             :          * | 0x1 0x2 0x3 0x1 |   | 0x0 0x5 0x0 0x4 |   | x[1] |
     139             :          * | 0x1 0x1 0x2 0x3 | x | 0x4 0x0 0x5 0x0 | x | x[2] |
     140             :          * | 0x3 0x1 0x1 0x2 |   | 0x0 0x4 0x0 0x5 |   | x[3] |
     141             :          */
     142           0 :         u32 y = mul_by_x2(x);
     143             : 
     144           0 :         return mix_columns(x ^ y ^ ror32(y, 16));
     145             : }
     146             : 
     147           0 : static __always_inline u32 subshift(u32 in[], int pos)
     148             : {
     149           0 :         return (aes_sbox[in[pos] & 0xff]) ^
     150           0 :                (aes_sbox[(in[(pos + 1) % 4] >>  8) & 0xff] <<  8) ^
     151           0 :                (aes_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
     152           0 :                (aes_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24);
     153             : }
     154             : 
     155           0 : static __always_inline u32 inv_subshift(u32 in[], int pos)
     156             : {
     157           0 :         return (aes_inv_sbox[in[pos] & 0xff]) ^
     158           0 :                (aes_inv_sbox[(in[(pos + 3) % 4] >>  8) & 0xff] <<  8) ^
     159           0 :                (aes_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
     160           0 :                (aes_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24);
     161             : }
     162             : 
     163           0 : static u32 subw(u32 in)
     164             : {
     165           0 :         return (aes_sbox[in & 0xff]) ^
     166           0 :                (aes_sbox[(in >>  8) & 0xff] <<  8) ^
     167           0 :                (aes_sbox[(in >> 16) & 0xff] << 16) ^
     168           0 :                (aes_sbox[(in >> 24) & 0xff] << 24);
     169             : }
     170             : 
     171             : /**
     172             :  * aes_expandkey - Expands the AES key as described in FIPS-197
     173             :  * @ctx:        The location where the computed key will be stored.
     174             :  * @in_key:     The supplied key.
     175             :  * @key_len:    The length of the supplied key.
     176             :  *
     177             :  * Returns 0 on success. The function fails only if an invalid key size (or
     178             :  * pointer) is supplied.
     179             :  * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes
     180             :  * key schedule plus a 16 bytes key which is used before the first round).
     181             :  * The decryption key is prepared for the "Equivalent Inverse Cipher" as
     182             :  * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is
     183             :  * for the initial combination, the second slot for the first round and so on.
     184             :  */
     185           0 : int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
     186             :                   unsigned int key_len)
     187             : {
     188           0 :         u32 kwords = key_len / sizeof(u32);
     189           0 :         u32 rc, i, j;
     190           0 :         int err;
     191             : 
     192           0 :         err = aes_check_keylen(key_len);
     193           0 :         if (err)
     194             :                 return err;
     195             : 
     196           0 :         ctx->key_length = key_len;
     197             : 
     198           0 :         for (i = 0; i < kwords; i++)
     199           0 :                 ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
     200             : 
     201           0 :         for (i = 0, rc = 1; i < 10; i++, rc = mul_by_x(rc)) {
     202           0 :                 u32 *rki = ctx->key_enc + (i * kwords);
     203           0 :                 u32 *rko = rki + kwords;
     204             : 
     205           0 :                 rko[0] = ror32(subw(rki[kwords - 1]), 8) ^ rc ^ rki[0];
     206           0 :                 rko[1] = rko[0] ^ rki[1];
     207           0 :                 rko[2] = rko[1] ^ rki[2];
     208           0 :                 rko[3] = rko[2] ^ rki[3];
     209             : 
     210           0 :                 if (key_len == AES_KEYSIZE_192) {
     211           0 :                         if (i >= 7)
     212             :                                 break;
     213           0 :                         rko[4] = rko[3] ^ rki[4];
     214           0 :                         rko[5] = rko[4] ^ rki[5];
     215           0 :                 } else if (key_len == AES_KEYSIZE_256) {
     216           0 :                         if (i >= 6)
     217             :                                 break;
     218           0 :                         rko[4] = subw(rko[3]) ^ rki[4];
     219           0 :                         rko[5] = rko[4] ^ rki[5];
     220           0 :                         rko[6] = rko[5] ^ rki[6];
     221           0 :                         rko[7] = rko[6] ^ rki[7];
     222             :                 }
     223             :         }
     224             : 
     225             :         /*
     226             :          * Generate the decryption keys for the Equivalent Inverse Cipher.
     227             :          * This involves reversing the order of the round keys, and applying
     228             :          * the Inverse Mix Columns transformation to all but the first and
     229             :          * the last one.
     230             :          */
     231           0 :         ctx->key_dec[0] = ctx->key_enc[key_len + 24];
     232           0 :         ctx->key_dec[1] = ctx->key_enc[key_len + 25];
     233           0 :         ctx->key_dec[2] = ctx->key_enc[key_len + 26];
     234           0 :         ctx->key_dec[3] = ctx->key_enc[key_len + 27];
     235             : 
     236           0 :         for (i = 4, j = key_len + 20; j > 0; i += 4, j -= 4) {
     237           0 :                 ctx->key_dec[i]     = inv_mix_columns(ctx->key_enc[j]);
     238           0 :                 ctx->key_dec[i + 1] = inv_mix_columns(ctx->key_enc[j + 1]);
     239           0 :                 ctx->key_dec[i + 2] = inv_mix_columns(ctx->key_enc[j + 2]);
     240           0 :                 ctx->key_dec[i + 3] = inv_mix_columns(ctx->key_enc[j + 3]);
     241             :         }
     242             : 
     243           0 :         ctx->key_dec[i]     = ctx->key_enc[0];
     244           0 :         ctx->key_dec[i + 1] = ctx->key_enc[1];
     245           0 :         ctx->key_dec[i + 2] = ctx->key_enc[2];
     246           0 :         ctx->key_dec[i + 3] = ctx->key_enc[3];
     247             : 
     248           0 :         return 0;
     249             : }
     250             : EXPORT_SYMBOL(aes_expandkey);
     251             : 
     252             : /**
     253             :  * aes_encrypt - Encrypt a single AES block
     254             :  * @ctx:        Context struct containing the key schedule
     255             :  * @out:        Buffer to store the ciphertext
     256             :  * @in:         Buffer containing the plaintext
     257             :  */
     258           0 : void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
     259             : {
     260           0 :         const u32 *rkp = ctx->key_enc + 4;
     261           0 :         int rounds = 6 + ctx->key_length / 4;
     262           0 :         u32 st0[4], st1[4];
     263           0 :         int round;
     264             : 
     265           0 :         st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
     266           0 :         st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4);
     267           0 :         st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
     268           0 :         st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
     269             : 
     270             :         /*
     271             :          * Force the compiler to emit data independent Sbox references,
     272             :          * by xoring the input with Sbox values that are known to add up
     273             :          * to zero. This pulls the entire Sbox into the D-cache before any
     274             :          * data dependent lookups are done.
     275             :          */
     276           0 :         st0[0] ^= aes_sbox[ 0] ^ aes_sbox[ 64] ^ aes_sbox[134] ^ aes_sbox[195];
     277           0 :         st0[1] ^= aes_sbox[16] ^ aes_sbox[ 82] ^ aes_sbox[158] ^ aes_sbox[221];
     278           0 :         st0[2] ^= aes_sbox[32] ^ aes_sbox[ 96] ^ aes_sbox[160] ^ aes_sbox[234];
     279           0 :         st0[3] ^= aes_sbox[48] ^ aes_sbox[112] ^ aes_sbox[186] ^ aes_sbox[241];
     280             : 
     281           0 :         for (round = 0;; round += 2, rkp += 8) {
     282           0 :                 st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0];
     283           0 :                 st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1];
     284           0 :                 st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2];
     285           0 :                 st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3];
     286             : 
     287           0 :                 if (round == rounds - 2)
     288             :                         break;
     289             : 
     290           0 :                 st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4];
     291           0 :                 st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5];
     292           0 :                 st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6];
     293           0 :                 st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7];
     294             :         }
     295             : 
     296           0 :         put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out);
     297           0 :         put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
     298           0 :         put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
     299           0 :         put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
     300           0 : }
     301             : EXPORT_SYMBOL(aes_encrypt);
     302             : 
     303             : /**
     304             :  * aes_decrypt - Decrypt a single AES block
     305             :  * @ctx:        Context struct containing the key schedule
     306             :  * @out:        Buffer to store the plaintext
     307             :  * @in:         Buffer containing the ciphertext
     308             :  */
     309           0 : void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
     310             : {
     311           0 :         const u32 *rkp = ctx->key_dec + 4;
     312           0 :         int rounds = 6 + ctx->key_length / 4;
     313           0 :         u32 st0[4], st1[4];
     314           0 :         int round;
     315             : 
     316           0 :         st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
     317           0 :         st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4);
     318           0 :         st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
     319           0 :         st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
     320             : 
     321             :         /*
     322             :          * Force the compiler to emit data independent Sbox references,
     323             :          * by xoring the input with Sbox values that are known to add up
     324             :          * to zero. This pulls the entire Sbox into the D-cache before any
     325             :          * data dependent lookups are done.
     326             :          */
     327           0 :         st0[0] ^= aes_inv_sbox[ 0] ^ aes_inv_sbox[ 64] ^ aes_inv_sbox[129] ^ aes_inv_sbox[200];
     328           0 :         st0[1] ^= aes_inv_sbox[16] ^ aes_inv_sbox[ 83] ^ aes_inv_sbox[150] ^ aes_inv_sbox[212];
     329           0 :         st0[2] ^= aes_inv_sbox[32] ^ aes_inv_sbox[ 96] ^ aes_inv_sbox[160] ^ aes_inv_sbox[236];
     330           0 :         st0[3] ^= aes_inv_sbox[48] ^ aes_inv_sbox[112] ^ aes_inv_sbox[187] ^ aes_inv_sbox[247];
     331             : 
     332           0 :         for (round = 0;; round += 2, rkp += 8) {
     333           0 :                 st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0];
     334           0 :                 st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1];
     335           0 :                 st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2];
     336           0 :                 st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3];
     337             : 
     338           0 :                 if (round == rounds - 2)
     339             :                         break;
     340             : 
     341           0 :                 st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4];
     342           0 :                 st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5];
     343           0 :                 st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6];
     344           0 :                 st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7];
     345             :         }
     346             : 
     347           0 :         put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out);
     348           0 :         put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
     349           0 :         put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
     350           0 :         put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
     351           0 : }
     352             : EXPORT_SYMBOL(aes_decrypt);
     353             : 
     354             : MODULE_DESCRIPTION("Generic AES library");
     355             : MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
     356             : MODULE_LICENSE("GPL v2");

Generated by: LCOV version 1.14