Merge branch 'topic/hwdep-cleanup' into for-linus
[linux-2.6] / drivers / crypto / padlock-sha.c
1 /*
2  * Cryptographic API.
3  *
4  * Support for VIA PadLock hardware crypto engine.
5  *
6  * Copyright (c) 2006  Michal Ludvig <michal@logix.cz>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  */
14
15 #include <crypto/algapi.h>
16 #include <crypto/sha.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/cryptohash.h>
22 #include <linux/interrupt.h>
23 #include <linux/kernel.h>
24 #include <linux/scatterlist.h>
25 #include <asm/i387.h>
26 #include "padlock.h"
27
28 #define SHA1_DEFAULT_FALLBACK   "sha1-generic"
29 #define SHA256_DEFAULT_FALLBACK "sha256-generic"
30
31 struct padlock_sha_ctx {
32         char            *data;
33         size_t          used;
34         int             bypass;
35         void (*f_sha_padlock)(const char *in, char *out, int count);
36         struct hash_desc fallback;
37 };
38
39 static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
40 {
41         return crypto_tfm_ctx(tfm);
42 }
43
44 /* We'll need aligned address on the stack */
45 #define NEAREST_ALIGNED(ptr) \
46         ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
47
48 static struct crypto_alg sha1_alg, sha256_alg;
49
50 static void padlock_sha_bypass(struct crypto_tfm *tfm)
51 {
52         if (ctx(tfm)->bypass)
53                 return;
54
55         crypto_hash_init(&ctx(tfm)->fallback);
56         if (ctx(tfm)->data && ctx(tfm)->used) {
57                 struct scatterlist sg;
58
59                 sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
60                 crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
61         }
62
63         ctx(tfm)->used = 0;
64         ctx(tfm)->bypass = 1;
65 }
66
67 static void padlock_sha_init(struct crypto_tfm *tfm)
68 {
69         ctx(tfm)->used = 0;
70         ctx(tfm)->bypass = 0;
71 }
72
73 static void padlock_sha_update(struct crypto_tfm *tfm,
74                         const uint8_t *data, unsigned int length)
75 {
76         /* Our buffer is always one page. */
77         if (unlikely(!ctx(tfm)->bypass &&
78                      (ctx(tfm)->used + length > PAGE_SIZE)))
79                 padlock_sha_bypass(tfm);
80
81         if (unlikely(ctx(tfm)->bypass)) {
82                 struct scatterlist sg;
83                 sg_init_one(&sg, (uint8_t *)data, length);
84                 crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
85                 return;
86         }
87
88         memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
89         ctx(tfm)->used += length;
90 }
91
92 static inline void padlock_output_block(uint32_t *src,
93                         uint32_t *dst, size_t count)
94 {
95         while (count--)
96                 *dst++ = swab32(*src++);
97 }
98
99 static void padlock_do_sha1(const char *in, char *out, int count)
100 {
101         /* We can't store directly to *out as it may be unaligned. */
102         /* BTW Don't reduce the buffer size below 128 Bytes!
103          *     PadLock microcode needs it that big. */
104         char buf[128+16];
105         char *result = NEAREST_ALIGNED(buf);
106         int ts_state;
107
108         ((uint32_t *)result)[0] = SHA1_H0;
109         ((uint32_t *)result)[1] = SHA1_H1;
110         ((uint32_t *)result)[2] = SHA1_H2;
111         ((uint32_t *)result)[3] = SHA1_H3;
112         ((uint32_t *)result)[4] = SHA1_H4;
113  
114         /* prevent taking the spurious DNA fault with padlock. */
115         ts_state = irq_ts_save();
116         asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
117                       : "+S"(in), "+D"(result)
118                       : "c"(count), "a"(0));
119         irq_ts_restore(ts_state);
120
121         padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
122 }
123
124 static void padlock_do_sha256(const char *in, char *out, int count)
125 {
126         /* We can't store directly to *out as it may be unaligned. */
127         /* BTW Don't reduce the buffer size below 128 Bytes!
128          *     PadLock microcode needs it that big. */
129         char buf[128+16];
130         char *result = NEAREST_ALIGNED(buf);
131         int ts_state;
132
133         ((uint32_t *)result)[0] = SHA256_H0;
134         ((uint32_t *)result)[1] = SHA256_H1;
135         ((uint32_t *)result)[2] = SHA256_H2;
136         ((uint32_t *)result)[3] = SHA256_H3;
137         ((uint32_t *)result)[4] = SHA256_H4;
138         ((uint32_t *)result)[5] = SHA256_H5;
139         ((uint32_t *)result)[6] = SHA256_H6;
140         ((uint32_t *)result)[7] = SHA256_H7;
141
142         /* prevent taking the spurious DNA fault with padlock. */
143         ts_state = irq_ts_save();
144         asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
145                       : "+S"(in), "+D"(result)
146                       : "c"(count), "a"(0));
147         irq_ts_restore(ts_state);
148
149         padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
150 }
151
152 static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
153 {
154         if (unlikely(ctx(tfm)->bypass)) {
155                 crypto_hash_final(&ctx(tfm)->fallback, out);
156                 ctx(tfm)->bypass = 0;
157                 return;
158         }
159
160         /* Pass the input buffer to PadLock microcode... */
161         ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
162
163         ctx(tfm)->used = 0;
164 }
165
166 static int padlock_cra_init(struct crypto_tfm *tfm)
167 {
168         const char *fallback_driver_name = tfm->__crt_alg->cra_name;
169         struct crypto_hash *fallback_tfm;
170
171         /* For now we'll allocate one page. This
172          * could eventually be configurable one day. */
173         ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
174         if (!ctx(tfm)->data)
175                 return -ENOMEM;
176
177         /* Allocate a fallback and abort if it failed. */
178         fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
179                                          CRYPTO_ALG_ASYNC |
180                                          CRYPTO_ALG_NEED_FALLBACK);
181         if (IS_ERR(fallback_tfm)) {
182                 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
183                        fallback_driver_name);
184                 free_page((unsigned long)(ctx(tfm)->data));
185                 return PTR_ERR(fallback_tfm);
186         }
187
188         ctx(tfm)->fallback.tfm = fallback_tfm;
189         return 0;
190 }
191
192 static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
193 {
194         ctx(tfm)->f_sha_padlock = padlock_do_sha1;
195
196         return padlock_cra_init(tfm);
197 }
198
199 static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
200 {
201         ctx(tfm)->f_sha_padlock = padlock_do_sha256;
202
203         return padlock_cra_init(tfm);
204 }
205
206 static void padlock_cra_exit(struct crypto_tfm *tfm)
207 {
208         if (ctx(tfm)->data) {
209                 free_page((unsigned long)(ctx(tfm)->data));
210                 ctx(tfm)->data = NULL;
211         }
212
213         crypto_free_hash(ctx(tfm)->fallback.tfm);
214         ctx(tfm)->fallback.tfm = NULL;
215 }
216
217 static struct crypto_alg sha1_alg = {
218         .cra_name               =       "sha1",
219         .cra_driver_name        =       "sha1-padlock",
220         .cra_priority           =       PADLOCK_CRA_PRIORITY,
221         .cra_flags              =       CRYPTO_ALG_TYPE_DIGEST |
222                                         CRYPTO_ALG_NEED_FALLBACK,
223         .cra_blocksize          =       SHA1_BLOCK_SIZE,
224         .cra_ctxsize            =       sizeof(struct padlock_sha_ctx),
225         .cra_module             =       THIS_MODULE,
226         .cra_list               =       LIST_HEAD_INIT(sha1_alg.cra_list),
227         .cra_init               =       padlock_sha1_cra_init,
228         .cra_exit               =       padlock_cra_exit,
229         .cra_u                  =       {
230                 .digest = {
231                         .dia_digestsize =       SHA1_DIGEST_SIZE,
232                         .dia_init       =       padlock_sha_init,
233                         .dia_update     =       padlock_sha_update,
234                         .dia_final      =       padlock_sha_final,
235                 }
236         }
237 };
238
239 static struct crypto_alg sha256_alg = {
240         .cra_name               =       "sha256",
241         .cra_driver_name        =       "sha256-padlock",
242         .cra_priority           =       PADLOCK_CRA_PRIORITY,
243         .cra_flags              =       CRYPTO_ALG_TYPE_DIGEST |
244                                         CRYPTO_ALG_NEED_FALLBACK,
245         .cra_blocksize          =       SHA256_BLOCK_SIZE,
246         .cra_ctxsize            =       sizeof(struct padlock_sha_ctx),
247         .cra_module             =       THIS_MODULE,
248         .cra_list               =       LIST_HEAD_INIT(sha256_alg.cra_list),
249         .cra_init               =       padlock_sha256_cra_init,
250         .cra_exit               =       padlock_cra_exit,
251         .cra_u                  =       {
252                 .digest = {
253                         .dia_digestsize =       SHA256_DIGEST_SIZE,
254                         .dia_init       =       padlock_sha_init,
255                         .dia_update     =       padlock_sha_update,
256                         .dia_final      =       padlock_sha_final,
257                 }
258         }
259 };
260
261 static int __init padlock_init(void)
262 {
263         int rc = -ENODEV;
264
265         if (!cpu_has_phe) {
266                 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
267                 return -ENODEV;
268         }
269
270         if (!cpu_has_phe_enabled) {
271                 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
272                 return -ENODEV;
273         }
274
275         rc = crypto_register_alg(&sha1_alg);
276         if (rc)
277                 goto out;
278
279         rc = crypto_register_alg(&sha256_alg);
280         if (rc)
281                 goto out_unreg1;
282
283         printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
284
285         return 0;
286
287 out_unreg1:
288         crypto_unregister_alg(&sha1_alg);
289 out:
290         printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
291         return rc;
292 }
293
294 static void __exit padlock_fini(void)
295 {
296         crypto_unregister_alg(&sha1_alg);
297         crypto_unregister_alg(&sha256_alg);
298 }
299
300 module_init(padlock_init);
301 module_exit(padlock_fini);
302
303 MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
304 MODULE_LICENSE("GPL");
305 MODULE_AUTHOR("Michal Ludvig");
306
307 MODULE_ALIAS("sha1-all");
308 MODULE_ALIAS("sha256-all");
309 MODULE_ALIAS("sha1-padlock");
310 MODULE_ALIAS("sha256-padlock");