2 * xfrm algorithm interface
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pfkeyv2.h>
15 #include <linux/crypto.h>
17 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
20 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
23 #include <asm/scatterlist.h>
26 * Algorithms supported by IPsec. These entries contain properties which
27 * are used in key negotiation and xfrm processing, and are used to verify
28 * that instantiated crypto transforms have correct parameters for IPsec
31 static struct xfrm_algo_desc aalg_list[] = {
33 .name = "hmac(digest_null)",
34 .compat = "digest_null",
44 .sadb_alg_id = SADB_X_AALG_NULL,
46 .sadb_alg_minbits = 0,
62 .sadb_alg_id = SADB_AALG_MD5HMAC,
64 .sadb_alg_minbits = 128,
65 .sadb_alg_maxbits = 128
80 .sadb_alg_id = SADB_AALG_SHA1HMAC,
82 .sadb_alg_minbits = 160,
83 .sadb_alg_maxbits = 160
87 .name = "hmac(sha256)",
98 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
100 .sadb_alg_minbits = 256,
101 .sadb_alg_maxbits = 256
105 .name = "hmac(ripemd160)",
106 .compat = "ripemd160",
116 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
118 .sadb_alg_minbits = 160,
119 .sadb_alg_maxbits = 160
133 .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC,
135 .sadb_alg_minbits = 128,
136 .sadb_alg_maxbits = 128
141 static struct xfrm_algo_desc ealg_list[] = {
143 .name = "ecb(cipher_null)",
144 .compat = "cipher_null",
154 .sadb_alg_id = SADB_EALG_NULL,
156 .sadb_alg_minbits = 0,
157 .sadb_alg_maxbits = 0
172 .sadb_alg_id = SADB_EALG_DESCBC,
174 .sadb_alg_minbits = 64,
175 .sadb_alg_maxbits = 64
179 .name = "cbc(des3_ede)",
180 .compat = "des3_ede",
190 .sadb_alg_id = SADB_EALG_3DESCBC,
192 .sadb_alg_minbits = 192,
193 .sadb_alg_maxbits = 192
197 .name = "cbc(cast128)",
208 .sadb_alg_id = SADB_X_EALG_CASTCBC,
210 .sadb_alg_minbits = 40,
211 .sadb_alg_maxbits = 128
215 .name = "cbc(blowfish)",
216 .compat = "blowfish",
226 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
228 .sadb_alg_minbits = 40,
229 .sadb_alg_maxbits = 448
244 .sadb_alg_id = SADB_X_EALG_AESCBC,
246 .sadb_alg_minbits = 128,
247 .sadb_alg_maxbits = 256
251 .name = "cbc(serpent)",
262 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
264 .sadb_alg_minbits = 128,
265 .sadb_alg_maxbits = 256,
269 .name = "cbc(twofish)",
280 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
282 .sadb_alg_minbits = 128,
283 .sadb_alg_maxbits = 256
288 static struct xfrm_algo_desc calg_list[] = {
296 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
305 .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
314 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
318 static inline int aalg_entries(void)
320 return ARRAY_SIZE(aalg_list);
323 static inline int ealg_entries(void)
325 return ARRAY_SIZE(ealg_list);
328 static inline int calg_entries(void)
330 return ARRAY_SIZE(calg_list);
333 /* Todo: generic iterators */
334 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
338 for (i = 0; i < aalg_entries(); i++) {
339 if (aalg_list[i].desc.sadb_alg_id == alg_id) {
340 if (aalg_list[i].available)
341 return &aalg_list[i];
348 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
350 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
354 for (i = 0; i < ealg_entries(); i++) {
355 if (ealg_list[i].desc.sadb_alg_id == alg_id) {
356 if (ealg_list[i].available)
357 return &ealg_list[i];
364 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
366 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
370 for (i = 0; i < calg_entries(); i++) {
371 if (calg_list[i].desc.sadb_alg_id == alg_id) {
372 if (calg_list[i].available)
373 return &calg_list[i];
380 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
382 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
383 int entries, u32 type, u32 mask,
384 char *name, int probe)
391 for (i = 0; i < entries; i++) {
392 if (strcmp(name, list[i].name) &&
393 (!list[i].compat || strcmp(name, list[i].compat)))
396 if (list[i].available)
402 status = crypto_has_alg(name, type, mask | CRYPTO_ALG_ASYNC);
406 list[i].available = status;
412 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
414 return xfrm_get_byname(aalg_list, aalg_entries(),
415 CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK,
418 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
420 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
422 return xfrm_get_byname(ealg_list, ealg_entries(),
423 CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK,
426 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
428 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
430 return xfrm_get_byname(calg_list, calg_entries(),
431 CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK,
434 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
436 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
438 if (idx >= aalg_entries())
441 return &aalg_list[idx];
443 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
445 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
447 if (idx >= ealg_entries())
450 return &ealg_list[idx];
452 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
455 * Probe for the availability of crypto algorithms, and set the available
456 * flag for any algorithms found on the system. This is typically called by
457 * pfkey during userspace SA add, update or register.
459 void xfrm_probe_algs(void)
464 BUG_ON(in_softirq());
466 for (i = 0; i < aalg_entries(); i++) {
467 status = crypto_has_hash(aalg_list[i].name, 0,
469 if (aalg_list[i].available != status)
470 aalg_list[i].available = status;
473 for (i = 0; i < ealg_entries(); i++) {
474 status = crypto_has_blkcipher(ealg_list[i].name, 0,
476 if (ealg_list[i].available != status)
477 ealg_list[i].available = status;
480 for (i = 0; i < calg_entries(); i++) {
481 status = crypto_has_comp(calg_list[i].name, 0,
483 if (calg_list[i].available != status)
484 calg_list[i].available = status;
488 EXPORT_SYMBOL_GPL(xfrm_probe_algs);
490 int xfrm_count_auth_supported(void)
494 for (i = 0, n = 0; i < aalg_entries(); i++)
495 if (aalg_list[i].available)
499 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
501 int xfrm_count_enc_supported(void)
505 for (i = 0, n = 0; i < ealg_entries(); i++)
506 if (ealg_list[i].available)
510 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
512 /* Move to common area: it is shared with AH. */
514 int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
515 int offset, int len, icv_update_fn_t icv_update)
517 int start = skb_headlen(skb);
518 int i, copy = start - offset;
520 struct scatterlist sg;
522 /* Checksum header. */
527 sg.page = virt_to_page(skb->data + offset);
528 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
531 err = icv_update(desc, &sg, copy);
535 if ((len -= copy) == 0)
540 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
543 BUG_TRAP(start <= offset + len);
545 end = start + skb_shinfo(skb)->frags[i].size;
546 if ((copy = end - offset) > 0) {
547 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
552 sg.page = frag->page;
553 sg.offset = frag->page_offset + offset-start;
556 err = icv_update(desc, &sg, copy);
567 if (skb_shinfo(skb)->frag_list) {
568 struct sk_buff *list = skb_shinfo(skb)->frag_list;
570 for (; list; list = list->next) {
573 BUG_TRAP(start <= offset + len);
575 end = start + list->len;
576 if ((copy = end - offset) > 0) {
579 err = skb_icv_walk(list, desc, offset-start,
583 if ((len -= copy) == 0)
593 EXPORT_SYMBOL_GPL(skb_icv_walk);
595 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
597 /* Looking generic it is not used in another places. */
600 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
602 int start = skb_headlen(skb);
603 int i, copy = start - offset;
609 sg[elt].page = virt_to_page(skb->data + offset);
610 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
611 sg[elt].length = copy;
613 if ((len -= copy) == 0)
618 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
621 BUG_TRAP(start <= offset + len);
623 end = start + skb_shinfo(skb)->frags[i].size;
624 if ((copy = end - offset) > 0) {
625 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
629 sg[elt].page = frag->page;
630 sg[elt].offset = frag->page_offset+offset-start;
631 sg[elt].length = copy;
640 if (skb_shinfo(skb)->frag_list) {
641 struct sk_buff *list = skb_shinfo(skb)->frag_list;
643 for (; list; list = list->next) {
646 BUG_TRAP(start <= offset + len);
648 end = start + list->len;
649 if ((copy = end - offset) > 0) {
652 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
653 if ((len -= copy) == 0)
663 EXPORT_SYMBOL_GPL(skb_to_sgvec);
665 /* Check that skb data bits are writable. If they are not, copy data
666 * to newly created private area. If "tailbits" is given, make sure that
667 * tailbits bytes beyond current end of skb are writable.
669 * Returns amount of elements of scatterlist to load for subsequent
670 * transformations and pointer to writable trailer skb.
673 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
677 struct sk_buff *skb1, **skb_p;
679 /* If skb is cloned or its head is paged, reallocate
680 * head pulling out all the pages (pages are considered not writable
681 * at the moment even if they are anonymous).
683 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
684 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
687 /* Easy case. Most of packets will go this way. */
688 if (!skb_shinfo(skb)->frag_list) {
689 /* A little of trouble, not enough of space for trailer.
690 * This should not happen, when stack is tuned to generate
691 * good frames. OK, on miss we reallocate and reserve even more
692 * space, 128 bytes is fair. */
694 if (skb_tailroom(skb) < tailbits &&
695 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
703 /* Misery. We are in troubles, going to mincer fragments... */
706 skb_p = &skb_shinfo(skb)->frag_list;
709 while ((skb1 = *skb_p) != NULL) {
712 /* The fragment is partially pulled by someone,
713 * this can happen on input. Copy it and everything
716 if (skb_shared(skb1))
719 /* If the skb is the last, worry about trailer. */
721 if (skb1->next == NULL && tailbits) {
722 if (skb_shinfo(skb1)->nr_frags ||
723 skb_shinfo(skb1)->frag_list ||
724 skb_tailroom(skb1) < tailbits)
725 ntail = tailbits + 128;
731 skb_shinfo(skb1)->nr_frags ||
732 skb_shinfo(skb1)->frag_list) {
733 struct sk_buff *skb2;
735 /* Fuck, we are miserable poor guys... */
737 skb2 = skb_copy(skb1, GFP_ATOMIC);
739 skb2 = skb_copy_expand(skb1,
743 if (unlikely(skb2 == NULL))
747 skb_set_owner_w(skb2, skb1->sk);
749 /* Looking around. Are we still alive?
750 * OK, link new skb, drop old one */
752 skb2->next = skb1->next;
764 EXPORT_SYMBOL_GPL(skb_cow_data);
766 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
769 skb->data_len += len;
772 return skb_put(tail, len);
774 EXPORT_SYMBOL_GPL(pskb_put);