2 * xfrm algorithm interface
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pfkeyv2.h>
15 #include <linux/crypto.h>
17 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
20 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
23 #include <asm/scatterlist.h>
26 * Algorithms supported by IPsec. These entries contain properties which
27 * are used in key negotiation and xfrm processing, and are used to verify
28 * that instantiated crypto transforms have correct parameters for IPsec
31 static struct xfrm_algo_desc aalg_list[] = {
33 .name = "hmac(digest_null)",
34 .compat = "digest_null",
44 .sadb_alg_id = SADB_X_AALG_NULL,
46 .sadb_alg_minbits = 0,
62 .sadb_alg_id = SADB_AALG_MD5HMAC,
64 .sadb_alg_minbits = 128,
65 .sadb_alg_maxbits = 128
80 .sadb_alg_id = SADB_AALG_SHA1HMAC,
82 .sadb_alg_minbits = 160,
83 .sadb_alg_maxbits = 160
87 .name = "hmac(sha256)",
98 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
100 .sadb_alg_minbits = 256,
101 .sadb_alg_maxbits = 256
105 .name = "hmac(ripemd160)",
106 .compat = "ripemd160",
116 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
118 .sadb_alg_minbits = 160,
119 .sadb_alg_maxbits = 160
124 static struct xfrm_algo_desc ealg_list[] = {
126 .name = "ecb(cipher_null)",
127 .compat = "cipher_null",
137 .sadb_alg_id = SADB_EALG_NULL,
139 .sadb_alg_minbits = 0,
140 .sadb_alg_maxbits = 0
155 .sadb_alg_id = SADB_EALG_DESCBC,
157 .sadb_alg_minbits = 64,
158 .sadb_alg_maxbits = 64
162 .name = "cbc(des3_ede)",
163 .compat = "des3_ede",
173 .sadb_alg_id = SADB_EALG_3DESCBC,
175 .sadb_alg_minbits = 192,
176 .sadb_alg_maxbits = 192
180 .name = "cbc(cast128)",
191 .sadb_alg_id = SADB_X_EALG_CASTCBC,
193 .sadb_alg_minbits = 40,
194 .sadb_alg_maxbits = 128
198 .name = "cbc(blowfish)",
199 .compat = "blowfish",
209 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
211 .sadb_alg_minbits = 40,
212 .sadb_alg_maxbits = 448
227 .sadb_alg_id = SADB_X_EALG_AESCBC,
229 .sadb_alg_minbits = 128,
230 .sadb_alg_maxbits = 256
234 .name = "cbc(serpent)",
245 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
247 .sadb_alg_minbits = 128,
248 .sadb_alg_maxbits = 256,
252 .name = "cbc(twofish)",
263 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
265 .sadb_alg_minbits = 128,
266 .sadb_alg_maxbits = 256
271 static struct xfrm_algo_desc calg_list[] = {
279 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
288 .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
297 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
301 static inline int aalg_entries(void)
303 return ARRAY_SIZE(aalg_list);
306 static inline int ealg_entries(void)
308 return ARRAY_SIZE(ealg_list);
311 static inline int calg_entries(void)
313 return ARRAY_SIZE(calg_list);
316 /* Todo: generic iterators */
317 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
321 for (i = 0; i < aalg_entries(); i++) {
322 if (aalg_list[i].desc.sadb_alg_id == alg_id) {
323 if (aalg_list[i].available)
324 return &aalg_list[i];
331 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
333 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
337 for (i = 0; i < ealg_entries(); i++) {
338 if (ealg_list[i].desc.sadb_alg_id == alg_id) {
339 if (ealg_list[i].available)
340 return &ealg_list[i];
347 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
349 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
353 for (i = 0; i < calg_entries(); i++) {
354 if (calg_list[i].desc.sadb_alg_id == alg_id) {
355 if (calg_list[i].available)
356 return &calg_list[i];
363 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
365 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
366 int entries, u32 type, u32 mask,
367 char *name, int probe)
374 for (i = 0; i < entries; i++) {
375 if (strcmp(name, list[i].name) &&
376 (!list[i].compat || strcmp(name, list[i].compat)))
379 if (list[i].available)
385 status = crypto_has_alg(name, type, mask | CRYPTO_ALG_ASYNC);
389 list[i].available = status;
395 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
397 return xfrm_get_byname(aalg_list, aalg_entries(),
398 CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK,
401 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
403 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
405 return xfrm_get_byname(ealg_list, ealg_entries(),
406 CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK,
409 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
411 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
413 return xfrm_get_byname(calg_list, calg_entries(),
414 CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK,
417 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
419 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
421 if (idx >= aalg_entries())
424 return &aalg_list[idx];
426 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
428 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
430 if (idx >= ealg_entries())
433 return &ealg_list[idx];
435 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
438 * Probe for the availability of crypto algorithms, and set the available
439 * flag for any algorithms found on the system. This is typically called by
440 * pfkey during userspace SA add, update or register.
442 void xfrm_probe_algs(void)
447 BUG_ON(in_softirq());
449 for (i = 0; i < aalg_entries(); i++) {
450 status = crypto_has_hash(aalg_list[i].name, 0,
452 if (aalg_list[i].available != status)
453 aalg_list[i].available = status;
456 for (i = 0; i < ealg_entries(); i++) {
457 status = crypto_has_blkcipher(ealg_list[i].name, 0,
459 if (ealg_list[i].available != status)
460 ealg_list[i].available = status;
463 for (i = 0; i < calg_entries(); i++) {
464 status = crypto_has_comp(calg_list[i].name, 0,
466 if (calg_list[i].available != status)
467 calg_list[i].available = status;
471 EXPORT_SYMBOL_GPL(xfrm_probe_algs);
473 int xfrm_count_auth_supported(void)
477 for (i = 0, n = 0; i < aalg_entries(); i++)
478 if (aalg_list[i].available)
482 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
484 int xfrm_count_enc_supported(void)
488 for (i = 0, n = 0; i < ealg_entries(); i++)
489 if (ealg_list[i].available)
493 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
495 /* Move to common area: it is shared with AH. */
497 int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
498 int offset, int len, icv_update_fn_t icv_update)
500 int start = skb_headlen(skb);
501 int i, copy = start - offset;
503 struct scatterlist sg;
505 /* Checksum header. */
510 sg.page = virt_to_page(skb->data + offset);
511 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
514 err = icv_update(desc, &sg, copy);
518 if ((len -= copy) == 0)
523 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
526 BUG_TRAP(start <= offset + len);
528 end = start + skb_shinfo(skb)->frags[i].size;
529 if ((copy = end - offset) > 0) {
530 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
535 sg.page = frag->page;
536 sg.offset = frag->page_offset + offset-start;
539 err = icv_update(desc, &sg, copy);
550 if (skb_shinfo(skb)->frag_list) {
551 struct sk_buff *list = skb_shinfo(skb)->frag_list;
553 for (; list; list = list->next) {
556 BUG_TRAP(start <= offset + len);
558 end = start + list->len;
559 if ((copy = end - offset) > 0) {
562 err = skb_icv_walk(list, desc, offset-start,
566 if ((len -= copy) == 0)
576 EXPORT_SYMBOL_GPL(skb_icv_walk);
578 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
580 /* Looking generic it is not used in another places. */
583 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
585 int start = skb_headlen(skb);
586 int i, copy = start - offset;
592 sg[elt].page = virt_to_page(skb->data + offset);
593 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
594 sg[elt].length = copy;
596 if ((len -= copy) == 0)
601 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
604 BUG_TRAP(start <= offset + len);
606 end = start + skb_shinfo(skb)->frags[i].size;
607 if ((copy = end - offset) > 0) {
608 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
612 sg[elt].page = frag->page;
613 sg[elt].offset = frag->page_offset+offset-start;
614 sg[elt].length = copy;
623 if (skb_shinfo(skb)->frag_list) {
624 struct sk_buff *list = skb_shinfo(skb)->frag_list;
626 for (; list; list = list->next) {
629 BUG_TRAP(start <= offset + len);
631 end = start + list->len;
632 if ((copy = end - offset) > 0) {
635 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
636 if ((len -= copy) == 0)
646 EXPORT_SYMBOL_GPL(skb_to_sgvec);
648 /* Check that skb data bits are writable. If they are not, copy data
649 * to newly created private area. If "tailbits" is given, make sure that
650 * tailbits bytes beyond current end of skb are writable.
652 * Returns amount of elements of scatterlist to load for subsequent
653 * transformations and pointer to writable trailer skb.
656 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
660 struct sk_buff *skb1, **skb_p;
662 /* If skb is cloned or its head is paged, reallocate
663 * head pulling out all the pages (pages are considered not writable
664 * at the moment even if they are anonymous).
666 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
667 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
670 /* Easy case. Most of packets will go this way. */
671 if (!skb_shinfo(skb)->frag_list) {
672 /* A little of trouble, not enough of space for trailer.
673 * This should not happen, when stack is tuned to generate
674 * good frames. OK, on miss we reallocate and reserve even more
675 * space, 128 bytes is fair. */
677 if (skb_tailroom(skb) < tailbits &&
678 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
686 /* Misery. We are in troubles, going to mincer fragments... */
689 skb_p = &skb_shinfo(skb)->frag_list;
692 while ((skb1 = *skb_p) != NULL) {
695 /* The fragment is partially pulled by someone,
696 * this can happen on input. Copy it and everything
699 if (skb_shared(skb1))
702 /* If the skb is the last, worry about trailer. */
704 if (skb1->next == NULL && tailbits) {
705 if (skb_shinfo(skb1)->nr_frags ||
706 skb_shinfo(skb1)->frag_list ||
707 skb_tailroom(skb1) < tailbits)
708 ntail = tailbits + 128;
714 skb_shinfo(skb1)->nr_frags ||
715 skb_shinfo(skb1)->frag_list) {
716 struct sk_buff *skb2;
718 /* Fuck, we are miserable poor guys... */
720 skb2 = skb_copy(skb1, GFP_ATOMIC);
722 skb2 = skb_copy_expand(skb1,
726 if (unlikely(skb2 == NULL))
730 skb_set_owner_w(skb2, skb1->sk);
732 /* Looking around. Are we still alive?
733 * OK, link new skb, drop old one */
735 skb2->next = skb1->next;
747 EXPORT_SYMBOL_GPL(skb_cow_data);
749 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
752 skb->data_len += len;
755 return skb_put(tail, len);
757 EXPORT_SYMBOL_GPL(pskb_put);