Automatic merge of /spare/repo/netdev-2.6 branch atmel
[linux-2.6] / net / xfrm / xfrm_algo.c
1 /* 
2  * xfrm algorithm interface
3  *
4  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option) 
9  * any later version.
10  */
11
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/pfkeyv2.h>
16 #include <linux/crypto.h>
17 #include <net/xfrm.h>
18 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
19 #include <net/ah.h>
20 #endif
21 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
22 #include <net/esp.h>
23 #endif
24 #include <asm/scatterlist.h>
25
26 /*
27  * Algorithms supported by IPsec.  These entries contain properties which
28  * are used in key negotiation and xfrm processing, and are used to verify
29  * that instantiated crypto transforms have correct parameters for IPsec
30  * purposes.
31  */
32 static struct xfrm_algo_desc aalg_list[] = {
33 {
34         .name = "digest_null",
35         
36         .uinfo = {
37                 .auth = {
38                         .icv_truncbits = 0,
39                         .icv_fullbits = 0,
40                 }
41         },
42         
43         .desc = {
44                 .sadb_alg_id = SADB_X_AALG_NULL,
45                 .sadb_alg_ivlen = 0,
46                 .sadb_alg_minbits = 0,
47                 .sadb_alg_maxbits = 0
48         }
49 },
50 {
51         .name = "md5",
52
53         .uinfo = {
54                 .auth = {
55                         .icv_truncbits = 96,
56                         .icv_fullbits = 128,
57                 }
58         },
59         
60         .desc = {
61                 .sadb_alg_id = SADB_AALG_MD5HMAC,
62                 .sadb_alg_ivlen = 0,
63                 .sadb_alg_minbits = 128,
64                 .sadb_alg_maxbits = 128
65         }
66 },
67 {
68         .name = "sha1",
69
70         .uinfo = {
71                 .auth = {
72                         .icv_truncbits = 96,
73                         .icv_fullbits = 160,
74                 }
75         },
76
77         .desc = {
78                 .sadb_alg_id = SADB_AALG_SHA1HMAC,
79                 .sadb_alg_ivlen = 0,
80                 .sadb_alg_minbits = 160,
81                 .sadb_alg_maxbits = 160
82         }
83 },
84 {
85         .name = "sha256",
86
87         .uinfo = {
88                 .auth = {
89                         .icv_truncbits = 96,
90                         .icv_fullbits = 256,
91                 }
92         },
93
94         .desc = {
95                 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
96                 .sadb_alg_ivlen = 0,
97                 .sadb_alg_minbits = 256,
98                 .sadb_alg_maxbits = 256
99         }
100 },
101 {
102         .name = "ripemd160",
103
104         .uinfo = {
105                 .auth = {
106                         .icv_truncbits = 96,
107                         .icv_fullbits = 160,
108                 }
109         },
110
111         .desc = {
112                 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
113                 .sadb_alg_ivlen = 0,
114                 .sadb_alg_minbits = 160,
115                 .sadb_alg_maxbits = 160
116         }
117 },
118 };
119
120 static struct xfrm_algo_desc ealg_list[] = {
121 {
122         .name = "cipher_null",
123         
124         .uinfo = {
125                 .encr = {
126                         .blockbits = 8,
127                         .defkeybits = 0,
128                 }
129         },
130         
131         .desc = {
132                 .sadb_alg_id =  SADB_EALG_NULL,
133                 .sadb_alg_ivlen = 0,
134                 .sadb_alg_minbits = 0,
135                 .sadb_alg_maxbits = 0
136         }
137 },
138 {
139         .name = "des",
140
141         .uinfo = {
142                 .encr = {
143                         .blockbits = 64,
144                         .defkeybits = 64,
145                 }
146         },
147
148         .desc = {
149                 .sadb_alg_id = SADB_EALG_DESCBC,
150                 .sadb_alg_ivlen = 8,
151                 .sadb_alg_minbits = 64,
152                 .sadb_alg_maxbits = 64
153         }
154 },
155 {
156         .name = "des3_ede",
157
158         .uinfo = {
159                 .encr = {
160                         .blockbits = 64,
161                         .defkeybits = 192,
162                 }
163         },
164
165         .desc = {
166                 .sadb_alg_id = SADB_EALG_3DESCBC,
167                 .sadb_alg_ivlen = 8,
168                 .sadb_alg_minbits = 192,
169                 .sadb_alg_maxbits = 192
170         }
171 },
172 {
173         .name = "cast128",
174
175         .uinfo = {
176                 .encr = {
177                         .blockbits = 64,
178                         .defkeybits = 128,
179                 }
180         },
181
182         .desc = {
183                 .sadb_alg_id = SADB_X_EALG_CASTCBC,
184                 .sadb_alg_ivlen = 8,
185                 .sadb_alg_minbits = 40,
186                 .sadb_alg_maxbits = 128
187         }
188 },
189 {
190         .name = "blowfish",
191
192         .uinfo = {
193                 .encr = {
194                         .blockbits = 64,
195                         .defkeybits = 128,
196                 }
197         },
198
199         .desc = {
200                 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
201                 .sadb_alg_ivlen = 8,
202                 .sadb_alg_minbits = 40,
203                 .sadb_alg_maxbits = 448
204         }
205 },
206 {
207         .name = "aes",
208
209         .uinfo = {
210                 .encr = {
211                         .blockbits = 128,
212                         .defkeybits = 128,
213                 }
214         },
215
216         .desc = {
217                 .sadb_alg_id = SADB_X_EALG_AESCBC,
218                 .sadb_alg_ivlen = 8,
219                 .sadb_alg_minbits = 128,
220                 .sadb_alg_maxbits = 256
221         }
222 },
223 {
224         .name = "serpent",
225
226         .uinfo = {
227                 .encr = {
228                         .blockbits = 128,
229                         .defkeybits = 128,
230                 }
231         },
232
233         .desc = {
234                 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
235                 .sadb_alg_ivlen = 8,
236                 .sadb_alg_minbits = 128,
237                 .sadb_alg_maxbits = 256,
238         }
239 },
240 {
241         .name = "twofish",
242                  
243         .uinfo = {
244                 .encr = {
245                         .blockbits = 128,
246                         .defkeybits = 128,
247                 }
248         },
249
250         .desc = {
251                 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
252                 .sadb_alg_ivlen = 8,
253                 .sadb_alg_minbits = 128,
254                 .sadb_alg_maxbits = 256
255         }
256 },
257 };
258
259 static struct xfrm_algo_desc calg_list[] = {
260 {
261         .name = "deflate",
262         .uinfo = {
263                 .comp = {
264                         .threshold = 90,
265                 }
266         },
267         .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
268 },
269 {
270         .name = "lzs",
271         .uinfo = {
272                 .comp = {
273                         .threshold = 90,
274                 }
275         },
276         .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
277 },
278 {
279         .name = "lzjh",
280         .uinfo = {
281                 .comp = {
282                         .threshold = 50,
283                 }
284         },
285         .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
286 },
287 };
288
289 static inline int aalg_entries(void)
290 {
291         return ARRAY_SIZE(aalg_list);
292 }
293
294 static inline int ealg_entries(void)
295 {
296         return ARRAY_SIZE(ealg_list);
297 }
298
299 static inline int calg_entries(void)
300 {
301         return ARRAY_SIZE(calg_list);
302 }
303
304 /* Todo: generic iterators */
305 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
306 {
307         int i;
308
309         for (i = 0; i < aalg_entries(); i++) {
310                 if (aalg_list[i].desc.sadb_alg_id == alg_id) {
311                         if (aalg_list[i].available)
312                                 return &aalg_list[i];
313                         else
314                                 break;
315                 }
316         }
317         return NULL;
318 }
319 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
320
321 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
322 {
323         int i;
324
325         for (i = 0; i < ealg_entries(); i++) {
326                 if (ealg_list[i].desc.sadb_alg_id == alg_id) {
327                         if (ealg_list[i].available)
328                                 return &ealg_list[i];
329                         else
330                                 break;
331                 }
332         }
333         return NULL;
334 }
335 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
336
337 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
338 {
339         int i;
340
341         for (i = 0; i < calg_entries(); i++) {
342                 if (calg_list[i].desc.sadb_alg_id == alg_id) {
343                         if (calg_list[i].available)
344                                 return &calg_list[i];
345                         else
346                                 break;
347                 }
348         }
349         return NULL;
350 }
351 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
352
353 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
354                                               int entries, char *name,
355                                               int probe)
356 {
357         int i, status;
358
359         if (!name)
360                 return NULL;
361
362         for (i = 0; i < entries; i++) {
363                 if (strcmp(name, list[i].name))
364                         continue;
365
366                 if (list[i].available)
367                         return &list[i];
368
369                 if (!probe)
370                         break;
371
372                 status = crypto_alg_available(name, 0);
373                 if (!status)
374                         break;
375
376                 list[i].available = status;
377                 return &list[i];
378         }
379         return NULL;
380 }
381
382 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
383 {
384         return xfrm_get_byname(aalg_list, aalg_entries(), name, probe);
385 }
386 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
387
388 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
389 {
390         return xfrm_get_byname(ealg_list, ealg_entries(), name, probe);
391 }
392 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
393
394 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
395 {
396         return xfrm_get_byname(calg_list, calg_entries(), name, probe);
397 }
398 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
399
400 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
401 {
402         if (idx >= aalg_entries())
403                 return NULL;
404
405         return &aalg_list[idx];
406 }
407 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
408
409 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
410 {
411         if (idx >= ealg_entries())
412                 return NULL;
413
414         return &ealg_list[idx];
415 }
416 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
417
418 /*
419  * Probe for the availability of crypto algorithms, and set the available
420  * flag for any algorithms found on the system.  This is typically called by
421  * pfkey during userspace SA add, update or register.
422  */
423 void xfrm_probe_algs(void)
424 {
425 #ifdef CONFIG_CRYPTO
426         int i, status;
427         
428         BUG_ON(in_softirq());
429
430         for (i = 0; i < aalg_entries(); i++) {
431                 status = crypto_alg_available(aalg_list[i].name, 0);
432                 if (aalg_list[i].available != status)
433                         aalg_list[i].available = status;
434         }
435         
436         for (i = 0; i < ealg_entries(); i++) {
437                 status = crypto_alg_available(ealg_list[i].name, 0);
438                 if (ealg_list[i].available != status)
439                         ealg_list[i].available = status;
440         }
441         
442         for (i = 0; i < calg_entries(); i++) {
443                 status = crypto_alg_available(calg_list[i].name, 0);
444                 if (calg_list[i].available != status)
445                         calg_list[i].available = status;
446         }
447 #endif
448 }
449 EXPORT_SYMBOL_GPL(xfrm_probe_algs);
450
451 int xfrm_count_auth_supported(void)
452 {
453         int i, n;
454
455         for (i = 0, n = 0; i < aalg_entries(); i++)
456                 if (aalg_list[i].available)
457                         n++;
458         return n;
459 }
460 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
461
462 int xfrm_count_enc_supported(void)
463 {
464         int i, n;
465
466         for (i = 0, n = 0; i < ealg_entries(); i++)
467                 if (ealg_list[i].available)
468                         n++;
469         return n;
470 }
471 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
472
473 /* Move to common area: it is shared with AH. */
474
475 void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
476                   int offset, int len, icv_update_fn_t icv_update)
477 {
478         int start = skb_headlen(skb);
479         int i, copy = start - offset;
480         struct scatterlist sg;
481
482         /* Checksum header. */
483         if (copy > 0) {
484                 if (copy > len)
485                         copy = len;
486                 
487                 sg.page = virt_to_page(skb->data + offset);
488                 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
489                 sg.length = copy;
490                 
491                 icv_update(tfm, &sg, 1);
492                 
493                 if ((len -= copy) == 0)
494                         return;
495                 offset += copy;
496         }
497
498         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
499                 int end;
500
501                 BUG_TRAP(start <= offset + len);
502
503                 end = start + skb_shinfo(skb)->frags[i].size;
504                 if ((copy = end - offset) > 0) {
505                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
506
507                         if (copy > len)
508                                 copy = len;
509                         
510                         sg.page = frag->page;
511                         sg.offset = frag->page_offset + offset-start;
512                         sg.length = copy;
513                         
514                         icv_update(tfm, &sg, 1);
515
516                         if (!(len -= copy))
517                                 return;
518                         offset += copy;
519                 }
520                 start = end;
521         }
522
523         if (skb_shinfo(skb)->frag_list) {
524                 struct sk_buff *list = skb_shinfo(skb)->frag_list;
525
526                 for (; list; list = list->next) {
527                         int end;
528
529                         BUG_TRAP(start <= offset + len);
530
531                         end = start + list->len;
532                         if ((copy = end - offset) > 0) {
533                                 if (copy > len)
534                                         copy = len;
535                                 skb_icv_walk(list, tfm, offset-start, copy, icv_update);
536                                 if ((len -= copy) == 0)
537                                         return;
538                                 offset += copy;
539                         }
540                         start = end;
541                 }
542         }
543         if (len)
544                 BUG();
545 }
546 EXPORT_SYMBOL_GPL(skb_icv_walk);
547
548 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
549
550 /* Looking generic it is not used in another places. */
551
552 int
553 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
554 {
555         int start = skb_headlen(skb);
556         int i, copy = start - offset;
557         int elt = 0;
558
559         if (copy > 0) {
560                 if (copy > len)
561                         copy = len;
562                 sg[elt].page = virt_to_page(skb->data + offset);
563                 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
564                 sg[elt].length = copy;
565                 elt++;
566                 if ((len -= copy) == 0)
567                         return elt;
568                 offset += copy;
569         }
570
571         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
572                 int end;
573
574                 BUG_TRAP(start <= offset + len);
575
576                 end = start + skb_shinfo(skb)->frags[i].size;
577                 if ((copy = end - offset) > 0) {
578                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
579
580                         if (copy > len)
581                                 copy = len;
582                         sg[elt].page = frag->page;
583                         sg[elt].offset = frag->page_offset+offset-start;
584                         sg[elt].length = copy;
585                         elt++;
586                         if (!(len -= copy))
587                                 return elt;
588                         offset += copy;
589                 }
590                 start = end;
591         }
592
593         if (skb_shinfo(skb)->frag_list) {
594                 struct sk_buff *list = skb_shinfo(skb)->frag_list;
595
596                 for (; list; list = list->next) {
597                         int end;
598
599                         BUG_TRAP(start <= offset + len);
600
601                         end = start + list->len;
602                         if ((copy = end - offset) > 0) {
603                                 if (copy > len)
604                                         copy = len;
605                                 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
606                                 if ((len -= copy) == 0)
607                                         return elt;
608                                 offset += copy;
609                         }
610                         start = end;
611                 }
612         }
613         if (len)
614                 BUG();
615         return elt;
616 }
617 EXPORT_SYMBOL_GPL(skb_to_sgvec);
618
619 /* Check that skb data bits are writable. If they are not, copy data
620  * to newly created private area. If "tailbits" is given, make sure that
621  * tailbits bytes beyond current end of skb are writable.
622  *
623  * Returns amount of elements of scatterlist to load for subsequent
624  * transformations and pointer to writable trailer skb.
625  */
626
627 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
628 {
629         int copyflag;
630         int elt;
631         struct sk_buff *skb1, **skb_p;
632
633         /* If skb is cloned or its head is paged, reallocate
634          * head pulling out all the pages (pages are considered not writable
635          * at the moment even if they are anonymous).
636          */
637         if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
638             __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
639                 return -ENOMEM;
640
641         /* Easy case. Most of packets will go this way. */
642         if (!skb_shinfo(skb)->frag_list) {
643                 /* A little of trouble, not enough of space for trailer.
644                  * This should not happen, when stack is tuned to generate
645                  * good frames. OK, on miss we reallocate and reserve even more
646                  * space, 128 bytes is fair. */
647
648                 if (skb_tailroom(skb) < tailbits &&
649                     pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
650                         return -ENOMEM;
651
652                 /* Voila! */
653                 *trailer = skb;
654                 return 1;
655         }
656
657         /* Misery. We are in troubles, going to mincer fragments... */
658
659         elt = 1;
660         skb_p = &skb_shinfo(skb)->frag_list;
661         copyflag = 0;
662
663         while ((skb1 = *skb_p) != NULL) {
664                 int ntail = 0;
665
666                 /* The fragment is partially pulled by someone,
667                  * this can happen on input. Copy it and everything
668                  * after it. */
669
670                 if (skb_shared(skb1))
671                         copyflag = 1;
672
673                 /* If the skb is the last, worry about trailer. */
674
675                 if (skb1->next == NULL && tailbits) {
676                         if (skb_shinfo(skb1)->nr_frags ||
677                             skb_shinfo(skb1)->frag_list ||
678                             skb_tailroom(skb1) < tailbits)
679                                 ntail = tailbits + 128;
680                 }
681
682                 if (copyflag ||
683                     skb_cloned(skb1) ||
684                     ntail ||
685                     skb_shinfo(skb1)->nr_frags ||
686                     skb_shinfo(skb1)->frag_list) {
687                         struct sk_buff *skb2;
688
689                         /* Fuck, we are miserable poor guys... */
690                         if (ntail == 0)
691                                 skb2 = skb_copy(skb1, GFP_ATOMIC);
692                         else
693                                 skb2 = skb_copy_expand(skb1,
694                                                        skb_headroom(skb1),
695                                                        ntail,
696                                                        GFP_ATOMIC);
697                         if (unlikely(skb2 == NULL))
698                                 return -ENOMEM;
699
700                         if (skb1->sk)
701                                 skb_set_owner_w(skb2, skb1->sk);
702
703                         /* Looking around. Are we still alive?
704                          * OK, link new skb, drop old one */
705
706                         skb2->next = skb1->next;
707                         *skb_p = skb2;
708                         kfree_skb(skb1);
709                         skb1 = skb2;
710                 }
711                 elt++;
712                 *trailer = skb1;
713                 skb_p = &skb1->next;
714         }
715
716         return elt;
717 }
718 EXPORT_SYMBOL_GPL(skb_cow_data);
719
720 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
721 {
722         if (tail != skb) {
723                 skb->data_len += len;
724                 skb->len += len;
725         }
726         return skb_put(tail, len);
727 }
728 EXPORT_SYMBOL_GPL(pskb_put);
729 #endif