Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Cryptographic API. | |
3 | * | |
4 | * Support for VIA PadLock hardware crypto engine. | |
5 | * | |
6 | * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> | |
7 | * | |
1da177e4 LT |
8 | */ |
9 | ||
28ce728a | 10 | #include <crypto/algapi.h> |
89e12654 | 11 | #include <crypto/aes.h> |
1da177e4 LT |
12 | #include <linux/module.h> |
13 | #include <linux/init.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/errno.h> | |
1da177e4 | 16 | #include <linux/interrupt.h> |
6789b2dc | 17 | #include <linux/kernel.h> |
420a4b20 HX |
18 | #include <linux/percpu.h> |
19 | #include <linux/smp.h> | |
1da177e4 | 20 | #include <asm/byteorder.h> |
e4914012 | 21 | #include <asm/i387.h> |
1da177e4 LT |
22 | #include "padlock.h" |
23 | ||
ccc17c34 ML |
24 | /* Control word. */ |
25 | struct cword { | |
26 | unsigned int __attribute__ ((__packed__)) | |
27 | rounds:4, | |
28 | algo:3, | |
29 | keygen:1, | |
30 | interm:1, | |
31 | encdec:1, | |
32 | ksize:2; | |
33 | } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | |
34 | ||
cc08632f ML |
35 | /* Whenever making any changes to the following |
36 | * structure *make sure* you keep E, d_data | |
7dc748e4 SS |
37 | * and cword aligned on 16 Bytes boundaries and |
38 | * the Hardware can access 16 * 16 bytes of E and d_data | |
39 | * (only the first 15 * 16 bytes matter but the HW reads | |
40 | * more). | |
41 | */ | |
1da177e4 | 42 | struct aes_ctx { |
7dc748e4 SS |
43 | u32 E[AES_MAX_KEYLENGTH_U32] |
44 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | |
45 | u32 d_data[AES_MAX_KEYLENGTH_U32] | |
46 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | |
6789b2dc HX |
47 | struct { |
48 | struct cword encrypt; | |
49 | struct cword decrypt; | |
50 | } cword; | |
82062c72 | 51 | u32 *D; |
1da177e4 LT |
52 | }; |
53 | ||
420a4b20 HX |
54 | static DEFINE_PER_CPU(struct cword *, last_cword); |
55 | ||
1da177e4 LT |
56 | /* Tells whether the ACE is capable to generate |
57 | the extended key for a given key_len. */ | |
58 | static inline int | |
59 | aes_hw_extkey_available(uint8_t key_len) | |
60 | { | |
61 | /* TODO: We should check the actual CPU model/stepping | |
62 | as it's possible that the capability will be | |
63 | added in the next CPU revisions. */ | |
64 | if (key_len == 16) | |
65 | return 1; | |
66 | return 0; | |
67 | } | |
68 | ||
28ce728a | 69 | static inline struct aes_ctx *aes_ctx_common(void *ctx) |
6789b2dc | 70 | { |
28ce728a | 71 | unsigned long addr = (unsigned long)ctx; |
f10b7897 HX |
72 | unsigned long align = PADLOCK_ALIGNMENT; |
73 | ||
74 | if (align <= crypto_tfm_ctx_alignment()) | |
75 | align = 1; | |
6c2bb98b | 76 | return (struct aes_ctx *)ALIGN(addr, align); |
6789b2dc HX |
77 | } |
78 | ||
28ce728a HX |
79 | static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) |
80 | { | |
81 | return aes_ctx_common(crypto_tfm_ctx(tfm)); | |
82 | } | |
83 | ||
84 | static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) | |
85 | { | |
86 | return aes_ctx_common(crypto_blkcipher_ctx(tfm)); | |
87 | } | |
88 | ||
6c2bb98b | 89 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
560c06ae | 90 | unsigned int key_len) |
1da177e4 | 91 | { |
6c2bb98b | 92 | struct aes_ctx *ctx = aes_ctx(tfm); |
06ace7a9 | 93 | const __le32 *key = (const __le32 *)in_key; |
560c06ae | 94 | u32 *flags = &tfm->crt_flags; |
7dc748e4 | 95 | struct crypto_aes_ctx gen_aes; |
420a4b20 | 96 | int cpu; |
1da177e4 | 97 | |
560c06ae | 98 | if (key_len % 8) { |
1da177e4 LT |
99 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
100 | return -EINVAL; | |
101 | } | |
102 | ||
6789b2dc HX |
103 | /* |
104 | * If the hardware is capable of generating the extended key | |
105 | * itself we must supply the plain key for both encryption | |
106 | * and decryption. | |
107 | */ | |
82062c72 | 108 | ctx->D = ctx->E; |
1da177e4 | 109 | |
7dc748e4 SS |
110 | ctx->E[0] = le32_to_cpu(key[0]); |
111 | ctx->E[1] = le32_to_cpu(key[1]); | |
112 | ctx->E[2] = le32_to_cpu(key[2]); | |
113 | ctx->E[3] = le32_to_cpu(key[3]); | |
1da177e4 | 114 | |
6789b2dc HX |
115 | /* Prepare control words. */ |
116 | memset(&ctx->cword, 0, sizeof(ctx->cword)); | |
117 | ||
118 | ctx->cword.decrypt.encdec = 1; | |
119 | ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; | |
120 | ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; | |
121 | ctx->cword.encrypt.ksize = (key_len - 16) / 8; | |
122 | ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; | |
123 | ||
1da177e4 LT |
124 | /* Don't generate extended keys if the hardware can do it. */ |
125 | if (aes_hw_extkey_available(key_len)) | |
420a4b20 | 126 | goto ok; |
1da177e4 | 127 | |
6789b2dc HX |
128 | ctx->D = ctx->d_data; |
129 | ctx->cword.encrypt.keygen = 1; | |
130 | ctx->cword.decrypt.keygen = 1; | |
131 | ||
7dc748e4 SS |
132 | if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) { |
133 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
134 | return -EINVAL; | |
1da177e4 LT |
135 | } |
136 | ||
7dc748e4 SS |
137 | memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); |
138 | memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); | |
420a4b20 HX |
139 | |
140 | ok: | |
141 | for_each_online_cpu(cpu) | |
142 | if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) || | |
143 | &ctx->cword.decrypt == per_cpu(last_cword, cpu)) | |
144 | per_cpu(last_cword, cpu) = NULL; | |
145 | ||
1da177e4 LT |
146 | return 0; |
147 | } | |
148 | ||
149 | /* ====== Encryption/decryption routines ====== */ | |
150 | ||
28e8c3ad | 151 | /* These are the real call to PadLock. */ |
420a4b20 HX |
152 | static inline void padlock_reset_key(struct cword *cword) |
153 | { | |
154 | int cpu = raw_smp_processor_id(); | |
155 | ||
156 | if (cword != per_cpu(last_cword, cpu)) | |
157 | asm volatile ("pushfl; popfl"); | |
158 | } | |
159 | ||
160 | static inline void padlock_store_cword(struct cword *cword) | |
866cd902 | 161 | { |
420a4b20 | 162 | per_cpu(last_cword, raw_smp_processor_id()) = cword; |
866cd902 HX |
163 | } |
164 | ||
e4914012 SS |
165 | /* |
166 | * While the padlock instructions don't use FP/SSE registers, they | |
167 | * generate a spurious DNA fault when cr0.ts is '1'. These instructions | |
168 | * should be used only inside the irq_ts_save/restore() context | |
169 | */ | |
170 | ||
d4a7dd8e | 171 | static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, |
420a4b20 | 172 | struct cword *control_word) |
d4a7dd8e HX |
173 | { |
174 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | |
175 | : "+S"(input), "+D"(output) | |
176 | : "d"(control_word), "b"(key), "c"(1)); | |
177 | } | |
178 | ||
179 | static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) | |
180 | { | |
490fe3f0 HX |
181 | u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; |
182 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | |
d4a7dd8e HX |
183 | |
184 | memcpy(tmp, in, AES_BLOCK_SIZE); | |
185 | padlock_xcrypt(tmp, out, key, cword); | |
186 | } | |
187 | ||
188 | static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, | |
189 | struct cword *cword) | |
190 | { | |
d4a7dd8e HX |
191 | /* padlock_xcrypt requires at least two blocks of data. */ |
192 | if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & | |
193 | (PAGE_SIZE - 1)))) { | |
194 | aes_crypt_copy(in, out, key, cword); | |
195 | return; | |
196 | } | |
197 | ||
198 | padlock_xcrypt(in, out, key, cword); | |
199 | } | |
200 | ||
6789b2dc HX |
201 | static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, |
202 | void *control_word, u32 count) | |
1da177e4 | 203 | { |
d4a7dd8e HX |
204 | if (count == 1) { |
205 | aes_crypt(input, output, key, control_word); | |
206 | return; | |
207 | } | |
208 | ||
d4a7dd8e HX |
209 | asm volatile ("test $1, %%cl;" |
210 | "je 1f;" | |
211 | "lea -1(%%ecx), %%eax;" | |
212 | "mov $1, %%ecx;" | |
213 | ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ | |
214 | "mov %%eax, %%ecx;" | |
215 | "1:" | |
216 | ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | |
1da177e4 | 217 | : "+S"(input), "+D"(output) |
d4a7dd8e HX |
218 | : "d"(control_word), "b"(key), "c"(count) |
219 | : "ax"); | |
1da177e4 LT |
220 | } |
221 | ||
476df259 HX |
222 | static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, |
223 | u8 *iv, void *control_word, u32 count) | |
28e8c3ad | 224 | { |
28e8c3ad HX |
225 | /* rep xcryptcbc */ |
226 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" | |
227 | : "+S" (input), "+D" (output), "+a" (iv) | |
228 | : "d" (control_word), "b" (key), "c" (count)); | |
476df259 | 229 | return iv; |
28e8c3ad HX |
230 | } |
231 | ||
6c2bb98b | 232 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
1da177e4 | 233 | { |
6c2bb98b | 234 | struct aes_ctx *ctx = aes_ctx(tfm); |
e4914012 | 235 | int ts_state; |
e4914012 | 236 | |
420a4b20 | 237 | padlock_reset_key(&ctx->cword.encrypt); |
e4914012 | 238 | ts_state = irq_ts_save(); |
d4a7dd8e | 239 | aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); |
e4914012 | 240 | irq_ts_restore(ts_state); |
420a4b20 | 241 | padlock_store_cword(&ctx->cword.encrypt); |
1da177e4 LT |
242 | } |
243 | ||
6c2bb98b | 244 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
1da177e4 | 245 | { |
6c2bb98b | 246 | struct aes_ctx *ctx = aes_ctx(tfm); |
e4914012 | 247 | int ts_state; |
e4914012 | 248 | |
420a4b20 | 249 | padlock_reset_key(&ctx->cword.encrypt); |
e4914012 | 250 | ts_state = irq_ts_save(); |
d4a7dd8e | 251 | aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); |
e4914012 | 252 | irq_ts_restore(ts_state); |
420a4b20 | 253 | padlock_store_cword(&ctx->cword.encrypt); |
1da177e4 LT |
254 | } |
255 | ||
256 | static struct crypto_alg aes_alg = { | |
257 | .cra_name = "aes", | |
c8a19c91 | 258 | .cra_driver_name = "aes-padlock", |
ccc17c34 | 259 | .cra_priority = PADLOCK_CRA_PRIORITY, |
1da177e4 LT |
260 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
261 | .cra_blocksize = AES_BLOCK_SIZE, | |
fbdae9f3 | 262 | .cra_ctxsize = sizeof(struct aes_ctx), |
6789b2dc | 263 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
1da177e4 LT |
264 | .cra_module = THIS_MODULE, |
265 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | |
266 | .cra_u = { | |
267 | .cipher = { | |
268 | .cia_min_keysize = AES_MIN_KEY_SIZE, | |
269 | .cia_max_keysize = AES_MAX_KEY_SIZE, | |
270 | .cia_setkey = aes_set_key, | |
271 | .cia_encrypt = aes_encrypt, | |
28e8c3ad | 272 | .cia_decrypt = aes_decrypt, |
1da177e4 LT |
273 | } |
274 | } | |
275 | }; | |
276 | ||
28ce728a HX |
277 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, |
278 | struct scatterlist *dst, struct scatterlist *src, | |
279 | unsigned int nbytes) | |
280 | { | |
281 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | |
282 | struct blkcipher_walk walk; | |
283 | int err; | |
e4914012 | 284 | int ts_state; |
28ce728a | 285 | |
420a4b20 | 286 | padlock_reset_key(&ctx->cword.encrypt); |
866cd902 | 287 | |
28ce728a HX |
288 | blkcipher_walk_init(&walk, dst, src, nbytes); |
289 | err = blkcipher_walk_virt(desc, &walk); | |
290 | ||
e4914012 | 291 | ts_state = irq_ts_save(); |
28ce728a HX |
292 | while ((nbytes = walk.nbytes)) { |
293 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | |
294 | ctx->E, &ctx->cword.encrypt, | |
295 | nbytes / AES_BLOCK_SIZE); | |
296 | nbytes &= AES_BLOCK_SIZE - 1; | |
297 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
298 | } | |
e4914012 | 299 | irq_ts_restore(ts_state); |
28ce728a | 300 | |
420a4b20 HX |
301 | padlock_store_cword(&ctx->cword.encrypt); |
302 | ||
28ce728a HX |
303 | return err; |
304 | } | |
305 | ||
306 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, | |
307 | struct scatterlist *dst, struct scatterlist *src, | |
308 | unsigned int nbytes) | |
309 | { | |
310 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | |
311 | struct blkcipher_walk walk; | |
312 | int err; | |
e4914012 | 313 | int ts_state; |
28ce728a | 314 | |
420a4b20 | 315 | padlock_reset_key(&ctx->cword.decrypt); |
866cd902 | 316 | |
28ce728a HX |
317 | blkcipher_walk_init(&walk, dst, src, nbytes); |
318 | err = blkcipher_walk_virt(desc, &walk); | |
319 | ||
e4914012 | 320 | ts_state = irq_ts_save(); |
28ce728a HX |
321 | while ((nbytes = walk.nbytes)) { |
322 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | |
323 | ctx->D, &ctx->cword.decrypt, | |
324 | nbytes / AES_BLOCK_SIZE); | |
325 | nbytes &= AES_BLOCK_SIZE - 1; | |
326 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
327 | } | |
e4914012 | 328 | irq_ts_restore(ts_state); |
420a4b20 HX |
329 | |
330 | padlock_store_cword(&ctx->cword.encrypt); | |
331 | ||
28ce728a HX |
332 | return err; |
333 | } | |
334 | ||
335 | static struct crypto_alg ecb_aes_alg = { | |
336 | .cra_name = "ecb(aes)", | |
337 | .cra_driver_name = "ecb-aes-padlock", | |
338 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, | |
339 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | |
340 | .cra_blocksize = AES_BLOCK_SIZE, | |
341 | .cra_ctxsize = sizeof(struct aes_ctx), | |
342 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | |
343 | .cra_type = &crypto_blkcipher_type, | |
344 | .cra_module = THIS_MODULE, | |
345 | .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), | |
346 | .cra_u = { | |
347 | .blkcipher = { | |
348 | .min_keysize = AES_MIN_KEY_SIZE, | |
349 | .max_keysize = AES_MAX_KEY_SIZE, | |
350 | .setkey = aes_set_key, | |
351 | .encrypt = ecb_aes_encrypt, | |
352 | .decrypt = ecb_aes_decrypt, | |
353 | } | |
354 | } | |
355 | }; | |
356 | ||
357 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, | |
358 | struct scatterlist *dst, struct scatterlist *src, | |
359 | unsigned int nbytes) | |
360 | { | |
361 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | |
362 | struct blkcipher_walk walk; | |
363 | int err; | |
e4914012 | 364 | int ts_state; |
28ce728a | 365 | |
420a4b20 | 366 | padlock_reset_key(&ctx->cword.encrypt); |
866cd902 | 367 | |
28ce728a HX |
368 | blkcipher_walk_init(&walk, dst, src, nbytes); |
369 | err = blkcipher_walk_virt(desc, &walk); | |
370 | ||
e4914012 | 371 | ts_state = irq_ts_save(); |
28ce728a HX |
372 | while ((nbytes = walk.nbytes)) { |
373 | u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, | |
374 | walk.dst.virt.addr, ctx->E, | |
375 | walk.iv, &ctx->cword.encrypt, | |
376 | nbytes / AES_BLOCK_SIZE); | |
377 | memcpy(walk.iv, iv, AES_BLOCK_SIZE); | |
378 | nbytes &= AES_BLOCK_SIZE - 1; | |
379 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
380 | } | |
e4914012 | 381 | irq_ts_restore(ts_state); |
28ce728a | 382 | |
420a4b20 HX |
383 | padlock_store_cword(&ctx->cword.decrypt); |
384 | ||
28ce728a HX |
385 | return err; |
386 | } | |
387 | ||
388 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, | |
389 | struct scatterlist *dst, struct scatterlist *src, | |
390 | unsigned int nbytes) | |
391 | { | |
392 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | |
393 | struct blkcipher_walk walk; | |
394 | int err; | |
e4914012 | 395 | int ts_state; |
28ce728a | 396 | |
420a4b20 | 397 | padlock_reset_key(&ctx->cword.encrypt); |
866cd902 | 398 | |
28ce728a HX |
399 | blkcipher_walk_init(&walk, dst, src, nbytes); |
400 | err = blkcipher_walk_virt(desc, &walk); | |
401 | ||
e4914012 | 402 | ts_state = irq_ts_save(); |
28ce728a HX |
403 | while ((nbytes = walk.nbytes)) { |
404 | padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, | |
405 | ctx->D, walk.iv, &ctx->cword.decrypt, | |
406 | nbytes / AES_BLOCK_SIZE); | |
407 | nbytes &= AES_BLOCK_SIZE - 1; | |
408 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
409 | } | |
410 | ||
e4914012 | 411 | irq_ts_restore(ts_state); |
420a4b20 HX |
412 | |
413 | padlock_store_cword(&ctx->cword.encrypt); | |
414 | ||
28ce728a HX |
415 | return err; |
416 | } | |
417 | ||
418 | static struct crypto_alg cbc_aes_alg = { | |
419 | .cra_name = "cbc(aes)", | |
420 | .cra_driver_name = "cbc-aes-padlock", | |
421 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, | |
422 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | |
423 | .cra_blocksize = AES_BLOCK_SIZE, | |
424 | .cra_ctxsize = sizeof(struct aes_ctx), | |
425 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | |
426 | .cra_type = &crypto_blkcipher_type, | |
427 | .cra_module = THIS_MODULE, | |
428 | .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), | |
429 | .cra_u = { | |
430 | .blkcipher = { | |
431 | .min_keysize = AES_MIN_KEY_SIZE, | |
432 | .max_keysize = AES_MAX_KEY_SIZE, | |
433 | .ivsize = AES_BLOCK_SIZE, | |
434 | .setkey = aes_set_key, | |
435 | .encrypt = cbc_aes_encrypt, | |
436 | .decrypt = cbc_aes_decrypt, | |
437 | } | |
438 | } | |
439 | }; | |
440 | ||
1191f0a4 | 441 | static int __init padlock_init(void) |
1da177e4 | 442 | { |
1191f0a4 ML |
443 | int ret; |
444 | ||
445 | if (!cpu_has_xcrypt) { | |
b43e726b | 446 | printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); |
1191f0a4 ML |
447 | return -ENODEV; |
448 | } | |
449 | ||
450 | if (!cpu_has_xcrypt_enabled) { | |
b43e726b | 451 | printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); |
1191f0a4 ML |
452 | return -ENODEV; |
453 | } | |
1da177e4 | 454 | |
28ce728a HX |
455 | if ((ret = crypto_register_alg(&aes_alg))) |
456 | goto aes_err; | |
457 | ||
458 | if ((ret = crypto_register_alg(&ecb_aes_alg))) | |
459 | goto ecb_aes_err; | |
460 | ||
461 | if ((ret = crypto_register_alg(&cbc_aes_alg))) | |
462 | goto cbc_aes_err; | |
1191f0a4 ML |
463 | |
464 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | |
465 | ||
28ce728a | 466 | out: |
1191f0a4 | 467 | return ret; |
28ce728a HX |
468 | |
469 | cbc_aes_err: | |
470 | crypto_unregister_alg(&ecb_aes_alg); | |
471 | ecb_aes_err: | |
472 | crypto_unregister_alg(&aes_alg); | |
473 | aes_err: | |
474 | printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); | |
475 | goto out; | |
1da177e4 LT |
476 | } |
477 | ||
1191f0a4 | 478 | static void __exit padlock_fini(void) |
1da177e4 | 479 | { |
28ce728a HX |
480 | crypto_unregister_alg(&cbc_aes_alg); |
481 | crypto_unregister_alg(&ecb_aes_alg); | |
1da177e4 LT |
482 | crypto_unregister_alg(&aes_alg); |
483 | } | |
1191f0a4 ML |
484 | |
485 | module_init(padlock_init); | |
486 | module_exit(padlock_fini); | |
487 | ||
488 | MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); | |
489 | MODULE_LICENSE("GPL"); | |
490 | MODULE_AUTHOR("Michal Ludvig"); | |
491 | ||
f8246af0 | 492 | MODULE_ALIAS("aes"); |