mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
5a0e3ad6af
percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
565 lines
15 KiB
C
565 lines
15 KiB
C
/*
|
|
* Cryptographic API.
|
|
*
|
|
* Support for VIA PadLock hardware crypto engine.
|
|
*
|
|
* Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
|
|
*
|
|
*/
|
|
|
|
#include <crypto/algapi.h>
|
|
#include <crypto/aes.h>
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/types.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/slab.h>
|
|
#include <asm/byteorder.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/i387.h>
|
|
#include "padlock.h"
|
|
|
|
/*
|
|
* Number of data blocks actually fetched for each xcrypt insn.
|
|
* Processors with prefetch errata will fetch extra blocks.
|
|
*/
|
|
static unsigned int ecb_fetch_blocks = 2;
|
|
#define MAX_ECB_FETCH_BLOCKS (8)
|
|
#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
|
|
|
|
static unsigned int cbc_fetch_blocks = 1;
|
|
#define MAX_CBC_FETCH_BLOCKS (4)
|
|
#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
|
|
|
|
/* Control word. */
|
|
struct cword {
|
|
unsigned int __attribute__ ((__packed__))
|
|
rounds:4,
|
|
algo:3,
|
|
keygen:1,
|
|
interm:1,
|
|
encdec:1,
|
|
ksize:2;
|
|
} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
|
|
|
|
/* Whenever making any changes to the following
|
|
* structure *make sure* you keep E, d_data
|
|
* and cword aligned on 16 Bytes boundaries and
|
|
* the Hardware can access 16 * 16 bytes of E and d_data
|
|
* (only the first 15 * 16 bytes matter but the HW reads
|
|
* more).
|
|
*/
|
|
struct aes_ctx {
|
|
u32 E[AES_MAX_KEYLENGTH_U32]
|
|
__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
|
|
u32 d_data[AES_MAX_KEYLENGTH_U32]
|
|
__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
|
|
struct {
|
|
struct cword encrypt;
|
|
struct cword decrypt;
|
|
} cword;
|
|
u32 *D;
|
|
};
|
|
|
|
static DEFINE_PER_CPU(struct cword *, paes_last_cword);
|
|
|
|
/* Tells whether the ACE is capable to generate
|
|
the extended key for a given key_len. */
|
|
static inline int
|
|
aes_hw_extkey_available(uint8_t key_len)
|
|
{
|
|
/* TODO: We should check the actual CPU model/stepping
|
|
as it's possible that the capability will be
|
|
added in the next CPU revisions. */
|
|
if (key_len == 16)
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
static inline struct aes_ctx *aes_ctx_common(void *ctx)
|
|
{
|
|
unsigned long addr = (unsigned long)ctx;
|
|
unsigned long align = PADLOCK_ALIGNMENT;
|
|
|
|
if (align <= crypto_tfm_ctx_alignment())
|
|
align = 1;
|
|
return (struct aes_ctx *)ALIGN(addr, align);
|
|
}
|
|
|
|
static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
|
|
{
|
|
return aes_ctx_common(crypto_tfm_ctx(tfm));
|
|
}
|
|
|
|
static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
|
|
{
|
|
return aes_ctx_common(crypto_blkcipher_ctx(tfm));
|
|
}
|
|
|
|
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|
unsigned int key_len)
|
|
{
|
|
struct aes_ctx *ctx = aes_ctx(tfm);
|
|
const __le32 *key = (const __le32 *)in_key;
|
|
u32 *flags = &tfm->crt_flags;
|
|
struct crypto_aes_ctx gen_aes;
|
|
int cpu;
|
|
|
|
if (key_len % 8) {
|
|
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
return -EINVAL;
|
|
}
|
|
|
|
/*
|
|
* If the hardware is capable of generating the extended key
|
|
* itself we must supply the plain key for both encryption
|
|
* and decryption.
|
|
*/
|
|
ctx->D = ctx->E;
|
|
|
|
ctx->E[0] = le32_to_cpu(key[0]);
|
|
ctx->E[1] = le32_to_cpu(key[1]);
|
|
ctx->E[2] = le32_to_cpu(key[2]);
|
|
ctx->E[3] = le32_to_cpu(key[3]);
|
|
|
|
/* Prepare control words. */
|
|
memset(&ctx->cword, 0, sizeof(ctx->cword));
|
|
|
|
ctx->cword.decrypt.encdec = 1;
|
|
ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
|
|
ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
|
|
ctx->cword.encrypt.ksize = (key_len - 16) / 8;
|
|
ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
|
|
|
|
/* Don't generate extended keys if the hardware can do it. */
|
|
if (aes_hw_extkey_available(key_len))
|
|
goto ok;
|
|
|
|
ctx->D = ctx->d_data;
|
|
ctx->cword.encrypt.keygen = 1;
|
|
ctx->cword.decrypt.keygen = 1;
|
|
|
|
if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
|
|
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
return -EINVAL;
|
|
}
|
|
|
|
memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
|
|
memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
|
|
|
|
ok:
|
|
for_each_online_cpu(cpu)
|
|
if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
|
|
&ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
|
|
per_cpu(paes_last_cword, cpu) = NULL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* ====== Encryption/decryption routines ====== */
|
|
|
|
/* These are the real call to PadLock. */
|
|
static inline void padlock_reset_key(struct cword *cword)
|
|
{
|
|
int cpu = raw_smp_processor_id();
|
|
|
|
if (cword != per_cpu(paes_last_cword, cpu))
|
|
#ifndef CONFIG_X86_64
|
|
asm volatile ("pushfl; popfl");
|
|
#else
|
|
asm volatile ("pushfq; popfq");
|
|
#endif
|
|
}
|
|
|
|
static inline void padlock_store_cword(struct cword *cword)
|
|
{
|
|
per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
|
|
}
|
|
|
|
/*
|
|
* While the padlock instructions don't use FP/SSE registers, they
|
|
* generate a spurious DNA fault when cr0.ts is '1'. These instructions
|
|
* should be used only inside the irq_ts_save/restore() context
|
|
*/
|
|
|
|
static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
|
|
struct cword *control_word, int count)
|
|
{
|
|
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
|
|
: "+S"(input), "+D"(output)
|
|
: "d"(control_word), "b"(key), "c"(count));
|
|
}
|
|
|
|
static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
|
|
u8 *iv, struct cword *control_word, int count)
|
|
{
|
|
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
|
|
: "+S" (input), "+D" (output), "+a" (iv)
|
|
: "d" (control_word), "b" (key), "c" (count));
|
|
return iv;
|
|
}
|
|
|
|
static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
|
|
struct cword *cword, int count)
|
|
{
|
|
/*
|
|
* Padlock prefetches extra data so we must provide mapped input buffers.
|
|
* Assume there are at least 16 bytes of stack already in use.
|
|
*/
|
|
u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
|
|
u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
|
|
|
|
memcpy(tmp, in, count * AES_BLOCK_SIZE);
|
|
rep_xcrypt_ecb(tmp, out, key, cword, count);
|
|
}
|
|
|
|
static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
|
|
u8 *iv, struct cword *cword, int count)
|
|
{
|
|
/*
|
|
* Padlock prefetches extra data so we must provide mapped input buffers.
|
|
* Assume there are at least 16 bytes of stack already in use.
|
|
*/
|
|
u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
|
|
u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
|
|
|
|
memcpy(tmp, in, count * AES_BLOCK_SIZE);
|
|
return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
|
|
}
|
|
|
|
static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
|
|
struct cword *cword, int count)
|
|
{
|
|
/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
|
|
* We could avoid some copying here but it's probably not worth it.
|
|
*/
|
|
if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
|
|
ecb_crypt_copy(in, out, key, cword, count);
|
|
return;
|
|
}
|
|
|
|
rep_xcrypt_ecb(in, out, key, cword, count);
|
|
}
|
|
|
|
static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
|
|
u8 *iv, struct cword *cword, int count)
|
|
{
|
|
/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
|
|
if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
|
|
return cbc_crypt_copy(in, out, key, iv, cword, count);
|
|
|
|
return rep_xcrypt_cbc(in, out, key, iv, cword, count);
|
|
}
|
|
|
|
static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
|
|
void *control_word, u32 count)
|
|
{
|
|
u32 initial = count & (ecb_fetch_blocks - 1);
|
|
|
|
if (count < ecb_fetch_blocks) {
|
|
ecb_crypt(input, output, key, control_word, count);
|
|
return;
|
|
}
|
|
|
|
if (initial)
|
|
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
|
|
: "+S"(input), "+D"(output)
|
|
: "d"(control_word), "b"(key), "c"(initial));
|
|
|
|
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
|
|
: "+S"(input), "+D"(output)
|
|
: "d"(control_word), "b"(key), "c"(count - initial));
|
|
}
|
|
|
|
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
|
|
u8 *iv, void *control_word, u32 count)
|
|
{
|
|
u32 initial = count & (cbc_fetch_blocks - 1);
|
|
|
|
if (count < cbc_fetch_blocks)
|
|
return cbc_crypt(input, output, key, iv, control_word, count);
|
|
|
|
if (initial)
|
|
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
|
|
: "+S" (input), "+D" (output), "+a" (iv)
|
|
: "d" (control_word), "b" (key), "c" (count));
|
|
|
|
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
|
|
: "+S" (input), "+D" (output), "+a" (iv)
|
|
: "d" (control_word), "b" (key), "c" (count-initial));
|
|
return iv;
|
|
}
|
|
|
|
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
{
|
|
struct aes_ctx *ctx = aes_ctx(tfm);
|
|
int ts_state;
|
|
|
|
padlock_reset_key(&ctx->cword.encrypt);
|
|
ts_state = irq_ts_save();
|
|
ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
|
|
irq_ts_restore(ts_state);
|
|
padlock_store_cword(&ctx->cword.encrypt);
|
|
}
|
|
|
|
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
{
|
|
struct aes_ctx *ctx = aes_ctx(tfm);
|
|
int ts_state;
|
|
|
|
padlock_reset_key(&ctx->cword.encrypt);
|
|
ts_state = irq_ts_save();
|
|
ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
|
|
irq_ts_restore(ts_state);
|
|
padlock_store_cword(&ctx->cword.encrypt);
|
|
}
|
|
|
|
static struct crypto_alg aes_alg = {
|
|
.cra_name = "aes",
|
|
.cra_driver_name = "aes-padlock",
|
|
.cra_priority = PADLOCK_CRA_PRIORITY,
|
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
.cra_ctxsize = sizeof(struct aes_ctx),
|
|
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
|
|
.cra_module = THIS_MODULE,
|
|
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
|
|
.cra_u = {
|
|
.cipher = {
|
|
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
|
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
|
.cia_setkey = aes_set_key,
|
|
.cia_encrypt = aes_encrypt,
|
|
.cia_decrypt = aes_decrypt,
|
|
}
|
|
}
|
|
};
|
|
|
|
static int ecb_aes_encrypt(struct blkcipher_desc *desc,
|
|
struct scatterlist *dst, struct scatterlist *src,
|
|
unsigned int nbytes)
|
|
{
|
|
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
|
|
struct blkcipher_walk walk;
|
|
int err;
|
|
int ts_state;
|
|
|
|
padlock_reset_key(&ctx->cword.encrypt);
|
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
|
|
ts_state = irq_ts_save();
|
|
while ((nbytes = walk.nbytes)) {
|
|
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
|
|
ctx->E, &ctx->cword.encrypt,
|
|
nbytes / AES_BLOCK_SIZE);
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
irq_ts_restore(ts_state);
|
|
|
|
padlock_store_cword(&ctx->cword.encrypt);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int ecb_aes_decrypt(struct blkcipher_desc *desc,
|
|
struct scatterlist *dst, struct scatterlist *src,
|
|
unsigned int nbytes)
|
|
{
|
|
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
|
|
struct blkcipher_walk walk;
|
|
int err;
|
|
int ts_state;
|
|
|
|
padlock_reset_key(&ctx->cword.decrypt);
|
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
|
|
ts_state = irq_ts_save();
|
|
while ((nbytes = walk.nbytes)) {
|
|
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
|
|
ctx->D, &ctx->cword.decrypt,
|
|
nbytes / AES_BLOCK_SIZE);
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
irq_ts_restore(ts_state);
|
|
|
|
padlock_store_cword(&ctx->cword.encrypt);
|
|
|
|
return err;
|
|
}
|
|
|
|
static struct crypto_alg ecb_aes_alg = {
|
|
.cra_name = "ecb(aes)",
|
|
.cra_driver_name = "ecb-aes-padlock",
|
|
.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
|
|
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
.cra_ctxsize = sizeof(struct aes_ctx),
|
|
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
|
|
.cra_type = &crypto_blkcipher_type,
|
|
.cra_module = THIS_MODULE,
|
|
.cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
|
|
.cra_u = {
|
|
.blkcipher = {
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
.setkey = aes_set_key,
|
|
.encrypt = ecb_aes_encrypt,
|
|
.decrypt = ecb_aes_decrypt,
|
|
}
|
|
}
|
|
};
|
|
|
|
static int cbc_aes_encrypt(struct blkcipher_desc *desc,
|
|
struct scatterlist *dst, struct scatterlist *src,
|
|
unsigned int nbytes)
|
|
{
|
|
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
|
|
struct blkcipher_walk walk;
|
|
int err;
|
|
int ts_state;
|
|
|
|
padlock_reset_key(&ctx->cword.encrypt);
|
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
|
|
ts_state = irq_ts_save();
|
|
while ((nbytes = walk.nbytes)) {
|
|
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
|
|
walk.dst.virt.addr, ctx->E,
|
|
walk.iv, &ctx->cword.encrypt,
|
|
nbytes / AES_BLOCK_SIZE);
|
|
memcpy(walk.iv, iv, AES_BLOCK_SIZE);
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
irq_ts_restore(ts_state);
|
|
|
|
padlock_store_cword(&ctx->cword.decrypt);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
|
struct scatterlist *dst, struct scatterlist *src,
|
|
unsigned int nbytes)
|
|
{
|
|
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
|
|
struct blkcipher_walk walk;
|
|
int err;
|
|
int ts_state;
|
|
|
|
padlock_reset_key(&ctx->cword.encrypt);
|
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
|
|
ts_state = irq_ts_save();
|
|
while ((nbytes = walk.nbytes)) {
|
|
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
|
|
ctx->D, walk.iv, &ctx->cword.decrypt,
|
|
nbytes / AES_BLOCK_SIZE);
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
|
|
irq_ts_restore(ts_state);
|
|
|
|
padlock_store_cword(&ctx->cword.encrypt);
|
|
|
|
return err;
|
|
}
|
|
|
|
static struct crypto_alg cbc_aes_alg = {
|
|
.cra_name = "cbc(aes)",
|
|
.cra_driver_name = "cbc-aes-padlock",
|
|
.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
|
|
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
.cra_ctxsize = sizeof(struct aes_ctx),
|
|
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
|
|
.cra_type = &crypto_blkcipher_type,
|
|
.cra_module = THIS_MODULE,
|
|
.cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
|
|
.cra_u = {
|
|
.blkcipher = {
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
.setkey = aes_set_key,
|
|
.encrypt = cbc_aes_encrypt,
|
|
.decrypt = cbc_aes_decrypt,
|
|
}
|
|
}
|
|
};
|
|
|
|
static int __init padlock_init(void)
|
|
{
|
|
int ret;
|
|
struct cpuinfo_x86 *c = &cpu_data(0);
|
|
|
|
if (!cpu_has_xcrypt) {
|
|
printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
|
|
return -ENODEV;
|
|
}
|
|
|
|
if (!cpu_has_xcrypt_enabled) {
|
|
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
|
|
return -ENODEV;
|
|
}
|
|
|
|
if ((ret = crypto_register_alg(&aes_alg)))
|
|
goto aes_err;
|
|
|
|
if ((ret = crypto_register_alg(&ecb_aes_alg)))
|
|
goto ecb_aes_err;
|
|
|
|
if ((ret = crypto_register_alg(&cbc_aes_alg)))
|
|
goto cbc_aes_err;
|
|
|
|
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
|
|
|
|
if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
|
|
ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
|
|
cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
|
|
printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
|
|
}
|
|
|
|
out:
|
|
return ret;
|
|
|
|
cbc_aes_err:
|
|
crypto_unregister_alg(&ecb_aes_alg);
|
|
ecb_aes_err:
|
|
crypto_unregister_alg(&aes_alg);
|
|
aes_err:
|
|
printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
|
|
goto out;
|
|
}
|
|
|
|
static void __exit padlock_fini(void)
|
|
{
|
|
crypto_unregister_alg(&cbc_aes_alg);
|
|
crypto_unregister_alg(&ecb_aes_alg);
|
|
crypto_unregister_alg(&aes_alg);
|
|
}
|
|
|
|
module_init(padlock_init);
|
|
module_exit(padlock_fini);
|
|
|
|
MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Michal Ludvig");
|
|
|
|
MODULE_ALIAS("aes");
|