4 * Support for VIA PadLock hardware crypto engine.
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
10 #include <crypto/algapi.h>
11 #include <crypto/aes.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/percpu.h>
19 #include <linux/smp.h>
20 #include <asm/byteorder.h>
26 unsigned int __attribute__ ((__packed__))
33 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
35 /* Whenever making any changes to the following
36 * structure *make sure* you keep E, d_data
37 * and cword aligned on 16 Bytes boundaries and
38 * the Hardware can access 16 * 16 bytes of E and d_data
39 * (only the first 15 * 16 bytes matter but the HW reads
43 u32 E[AES_MAX_KEYLENGTH_U32]
44 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
45 u32 d_data[AES_MAX_KEYLENGTH_U32]
46 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
54 static DEFINE_PER_CPU(struct cword *, last_cword);
56 /* Tells whether the ACE is capable to generate
57 the extended key for a given key_len. */
59 aes_hw_extkey_available(uint8_t key_len)
61 /* TODO: We should check the actual CPU model/stepping
62 as it's possible that the capability will be
63 added in the next CPU revisions. */
69 static inline struct aes_ctx *aes_ctx_common(void *ctx)
71 unsigned long addr = (unsigned long)ctx;
72 unsigned long align = PADLOCK_ALIGNMENT;
74 if (align <= crypto_tfm_ctx_alignment())
76 return (struct aes_ctx *)ALIGN(addr, align);
79 static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
81 return aes_ctx_common(crypto_tfm_ctx(tfm));
84 static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
86 return aes_ctx_common(crypto_blkcipher_ctx(tfm));
89 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
92 struct aes_ctx *ctx = aes_ctx(tfm);
93 const __le32 *key = (const __le32 *)in_key;
94 u32 *flags = &tfm->crt_flags;
95 struct crypto_aes_ctx gen_aes;
99 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
104 * If the hardware is capable of generating the extended key
105 * itself we must supply the plain key for both encryption
110 ctx->E[0] = le32_to_cpu(key[0]);
111 ctx->E[1] = le32_to_cpu(key[1]);
112 ctx->E[2] = le32_to_cpu(key[2]);
113 ctx->E[3] = le32_to_cpu(key[3]);
115 /* Prepare control words. */
116 memset(&ctx->cword, 0, sizeof(ctx->cword));
118 ctx->cword.decrypt.encdec = 1;
119 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
120 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
121 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
122 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
124 /* Don't generate extended keys if the hardware can do it. */
125 if (aes_hw_extkey_available(key_len))
128 ctx->D = ctx->d_data;
129 ctx->cword.encrypt.keygen = 1;
130 ctx->cword.decrypt.keygen = 1;
132 if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
133 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
137 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
138 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
141 for_each_online_cpu(cpu)
142 if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
143 &ctx->cword.decrypt == per_cpu(last_cword, cpu))
144 per_cpu(last_cword, cpu) = NULL;
149 /* ====== Encryption/decryption routines ====== */
151 /* These are the real call to PadLock. */
152 static inline void padlock_reset_key(struct cword *cword)
154 int cpu = raw_smp_processor_id();
156 if (cword != per_cpu(last_cword, cpu))
157 #ifndef CONFIG_X86_64
158 asm volatile ("pushfl; popfl");
160 asm volatile ("pushfq; popfq");
164 static inline void padlock_store_cword(struct cword *cword)
166 per_cpu(last_cword, raw_smp_processor_id()) = cword;
170 * While the padlock instructions don't use FP/SSE registers, they
171 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
172 * should be used only inside the irq_ts_save/restore() context
175 static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
176 struct cword *control_word)
178 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
179 : "+S"(input), "+D"(output)
180 : "d"(control_word), "b"(key), "c"(1));
183 static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
185 u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
186 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
188 memcpy(tmp, in, AES_BLOCK_SIZE);
189 padlock_xcrypt(tmp, out, key, cword);
192 static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
195 /* padlock_xcrypt requires at least two blocks of data. */
196 if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
198 aes_crypt_copy(in, out, key, cword);
202 padlock_xcrypt(in, out, key, cword);
205 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
206 void *control_word, u32 count)
209 aes_crypt(input, output, key, control_word);
213 asm volatile ("test $1, %%cl;"
215 #ifndef CONFIG_X86_64
216 "lea -1(%%ecx), %%eax;"
219 "lea -1(%%rcx), %%rax;"
222 ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
223 #ifndef CONFIG_X86_64
229 ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
230 : "+S"(input), "+D"(output)
231 : "d"(control_word), "b"(key), "c"(count)
235 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
236 u8 *iv, void *control_word, u32 count)
239 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
240 : "+S" (input), "+D" (output), "+a" (iv)
241 : "d" (control_word), "b" (key), "c" (count));
245 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
247 struct aes_ctx *ctx = aes_ctx(tfm);
250 padlock_reset_key(&ctx->cword.encrypt);
251 ts_state = irq_ts_save();
252 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
253 irq_ts_restore(ts_state);
254 padlock_store_cword(&ctx->cword.encrypt);
257 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
259 struct aes_ctx *ctx = aes_ctx(tfm);
262 padlock_reset_key(&ctx->cword.encrypt);
263 ts_state = irq_ts_save();
264 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
265 irq_ts_restore(ts_state);
266 padlock_store_cword(&ctx->cword.encrypt);
269 static struct crypto_alg aes_alg = {
271 .cra_driver_name = "aes-padlock",
272 .cra_priority = PADLOCK_CRA_PRIORITY,
273 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
274 .cra_blocksize = AES_BLOCK_SIZE,
275 .cra_ctxsize = sizeof(struct aes_ctx),
276 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
277 .cra_module = THIS_MODULE,
278 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
281 .cia_min_keysize = AES_MIN_KEY_SIZE,
282 .cia_max_keysize = AES_MAX_KEY_SIZE,
283 .cia_setkey = aes_set_key,
284 .cia_encrypt = aes_encrypt,
285 .cia_decrypt = aes_decrypt,
290 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
291 struct scatterlist *dst, struct scatterlist *src,
294 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
295 struct blkcipher_walk walk;
299 padlock_reset_key(&ctx->cword.encrypt);
301 blkcipher_walk_init(&walk, dst, src, nbytes);
302 err = blkcipher_walk_virt(desc, &walk);
304 ts_state = irq_ts_save();
305 while ((nbytes = walk.nbytes)) {
306 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
307 ctx->E, &ctx->cword.encrypt,
308 nbytes / AES_BLOCK_SIZE);
309 nbytes &= AES_BLOCK_SIZE - 1;
310 err = blkcipher_walk_done(desc, &walk, nbytes);
312 irq_ts_restore(ts_state);
314 padlock_store_cword(&ctx->cword.encrypt);
319 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
320 struct scatterlist *dst, struct scatterlist *src,
323 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
324 struct blkcipher_walk walk;
328 padlock_reset_key(&ctx->cword.decrypt);
330 blkcipher_walk_init(&walk, dst, src, nbytes);
331 err = blkcipher_walk_virt(desc, &walk);
333 ts_state = irq_ts_save();
334 while ((nbytes = walk.nbytes)) {
335 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
336 ctx->D, &ctx->cword.decrypt,
337 nbytes / AES_BLOCK_SIZE);
338 nbytes &= AES_BLOCK_SIZE - 1;
339 err = blkcipher_walk_done(desc, &walk, nbytes);
341 irq_ts_restore(ts_state);
343 padlock_store_cword(&ctx->cword.encrypt);
348 static struct crypto_alg ecb_aes_alg = {
349 .cra_name = "ecb(aes)",
350 .cra_driver_name = "ecb-aes-padlock",
351 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
352 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
353 .cra_blocksize = AES_BLOCK_SIZE,
354 .cra_ctxsize = sizeof(struct aes_ctx),
355 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
356 .cra_type = &crypto_blkcipher_type,
357 .cra_module = THIS_MODULE,
358 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
361 .min_keysize = AES_MIN_KEY_SIZE,
362 .max_keysize = AES_MAX_KEY_SIZE,
363 .setkey = aes_set_key,
364 .encrypt = ecb_aes_encrypt,
365 .decrypt = ecb_aes_decrypt,
370 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
371 struct scatterlist *dst, struct scatterlist *src,
374 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
375 struct blkcipher_walk walk;
379 padlock_reset_key(&ctx->cword.encrypt);
381 blkcipher_walk_init(&walk, dst, src, nbytes);
382 err = blkcipher_walk_virt(desc, &walk);
384 ts_state = irq_ts_save();
385 while ((nbytes = walk.nbytes)) {
386 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
387 walk.dst.virt.addr, ctx->E,
388 walk.iv, &ctx->cword.encrypt,
389 nbytes / AES_BLOCK_SIZE);
390 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
391 nbytes &= AES_BLOCK_SIZE - 1;
392 err = blkcipher_walk_done(desc, &walk, nbytes);
394 irq_ts_restore(ts_state);
396 padlock_store_cword(&ctx->cword.decrypt);
401 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
402 struct scatterlist *dst, struct scatterlist *src,
405 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
406 struct blkcipher_walk walk;
410 padlock_reset_key(&ctx->cword.encrypt);
412 blkcipher_walk_init(&walk, dst, src, nbytes);
413 err = blkcipher_walk_virt(desc, &walk);
415 ts_state = irq_ts_save();
416 while ((nbytes = walk.nbytes)) {
417 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
418 ctx->D, walk.iv, &ctx->cword.decrypt,
419 nbytes / AES_BLOCK_SIZE);
420 nbytes &= AES_BLOCK_SIZE - 1;
421 err = blkcipher_walk_done(desc, &walk, nbytes);
424 irq_ts_restore(ts_state);
426 padlock_store_cword(&ctx->cword.encrypt);
431 static struct crypto_alg cbc_aes_alg = {
432 .cra_name = "cbc(aes)",
433 .cra_driver_name = "cbc-aes-padlock",
434 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
435 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
436 .cra_blocksize = AES_BLOCK_SIZE,
437 .cra_ctxsize = sizeof(struct aes_ctx),
438 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
439 .cra_type = &crypto_blkcipher_type,
440 .cra_module = THIS_MODULE,
441 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
444 .min_keysize = AES_MIN_KEY_SIZE,
445 .max_keysize = AES_MAX_KEY_SIZE,
446 .ivsize = AES_BLOCK_SIZE,
447 .setkey = aes_set_key,
448 .encrypt = cbc_aes_encrypt,
449 .decrypt = cbc_aes_decrypt,
454 static int __init padlock_init(void)
458 if (!cpu_has_xcrypt) {
459 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
463 if (!cpu_has_xcrypt_enabled) {
464 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
468 if ((ret = crypto_register_alg(&aes_alg)))
471 if ((ret = crypto_register_alg(&ecb_aes_alg)))
474 if ((ret = crypto_register_alg(&cbc_aes_alg)))
477 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
483 crypto_unregister_alg(&ecb_aes_alg);
485 crypto_unregister_alg(&aes_alg);
487 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
491 static void __exit padlock_fini(void)
493 crypto_unregister_alg(&cbc_aes_alg);
494 crypto_unregister_alg(&ecb_aes_alg);
495 crypto_unregister_alg(&aes_alg);
498 module_init(padlock_init);
499 module_exit(padlock_fini);
501 MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
502 MODULE_LICENSE("GPL");
503 MODULE_AUTHOR("Michal Ludvig");