1 #include <crypto/aead.h>
2 #include <crypto/authenc.h>
4 #include <linux/module.h>
8 #include <linux/scatterlist.h>
9 #include <linux/kernel.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/rtnetlink.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/in6.h>
16 #include <net/protocol.h>
20 struct xfrm_skb_cb xfrm;
24 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
27 * Allocate an AEAD request structure with extra space for SG and IV.
29 * For alignment considerations the IV is placed at the front, followed
30 * by the request and finally the SG list.
32 * TODO: Use spare space in skb for this where possible.
34 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
38 len = crypto_aead_ivsize(aead);
40 len += crypto_aead_alignmask(aead) &
41 ~(crypto_tfm_ctx_alignment() - 1);
42 len = ALIGN(len, crypto_tfm_ctx_alignment());
45 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
46 len = ALIGN(len, __alignof__(struct scatterlist));
48 len += sizeof(struct scatterlist) * nfrags;
50 return kmalloc(len, GFP_ATOMIC);
53 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
55 return crypto_aead_ivsize(aead) ?
56 PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
59 static inline struct aead_givcrypt_request *esp_tmp_givreq(
60 struct crypto_aead *aead, u8 *iv)
62 struct aead_givcrypt_request *req;
64 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
65 crypto_tfm_ctx_alignment());
66 aead_givcrypt_set_tfm(req, aead);
70 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
72 struct aead_request *req;
74 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
75 crypto_tfm_ctx_alignment());
76 aead_request_set_tfm(req, aead);
80 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
81 struct aead_request *req)
83 return (void *)ALIGN((unsigned long)(req + 1) +
84 crypto_aead_reqsize(aead),
85 __alignof__(struct scatterlist));
88 static inline struct scatterlist *esp_givreq_sg(
89 struct crypto_aead *aead, struct aead_givcrypt_request *req)
91 return (void *)ALIGN((unsigned long)(req + 1) +
92 crypto_aead_reqsize(aead),
93 __alignof__(struct scatterlist));
96 static void esp_output_done(struct crypto_async_request *base, int err)
98 struct sk_buff *skb = base->data;
100 kfree(ESP_SKB_CB(skb)->tmp);
101 xfrm_output_resume(skb, err);
104 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
107 struct ip_esp_hdr *esph;
108 struct crypto_aead *aead;
109 struct aead_givcrypt_request *req;
110 struct scatterlist *sg;
111 struct scatterlist *asg;
112 struct esp_data *esp;
113 struct sk_buff *trailer;
122 /* skb is pure payload to encrypt */
126 /* Round to block size */
131 alen = crypto_aead_authsize(aead);
133 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
134 clen = ALIGN(clen + 2, blksize);
136 clen = ALIGN(clen, esp->padlen);
138 if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
142 tmp = esp_alloc_tmp(aead, nfrags + 1);
146 iv = esp_tmp_iv(aead, tmp);
147 req = esp_tmp_givreq(aead, iv);
148 asg = esp_givreq_sg(aead, req);
151 /* Fill padding... */
152 tail = skb_tail_pointer(trailer);
155 for (i=0; i<clen-skb->len - 2; i++)
158 tail[clen - skb->len - 2] = (clen - skb->len) - 2;
159 tail[clen - skb->len - 1] = *skb_mac_header(skb);
160 pskb_put(skb, trailer, clen - skb->len + alen);
162 skb_push(skb, -skb_network_offset(skb));
163 esph = ip_esp_hdr(skb);
164 *skb_mac_header(skb) = IPPROTO_ESP;
166 /* this is non-NULL only with UDP Encapsulation */
168 struct xfrm_encap_tmpl *encap = x->encap;
174 spin_lock_bh(&x->lock);
175 sport = encap->encap_sport;
176 dport = encap->encap_dport;
177 encap_type = encap->encap_type;
178 spin_unlock_bh(&x->lock);
180 uh = (struct udphdr *)esph;
183 uh->len = htons(skb->len - skb_transport_offset(skb));
186 switch (encap_type) {
188 case UDP_ENCAP_ESPINUDP:
189 esph = (struct ip_esp_hdr *)(uh + 1);
191 case UDP_ENCAP_ESPINUDP_NON_IKE:
192 udpdata32 = (__be32 *)(uh + 1);
193 udpdata32[0] = udpdata32[1] = 0;
194 esph = (struct ip_esp_hdr *)(udpdata32 + 2);
198 *skb_mac_header(skb) = IPPROTO_UDP;
201 esph->spi = x->id.spi;
202 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
204 sg_init_table(sg, nfrags);
205 skb_to_sgvec(skb, sg,
206 esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
208 sg_init_one(asg, esph, sizeof(*esph));
210 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
211 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
212 aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
213 aead_givcrypt_set_giv(req, esph->enc_data,
214 XFRM_SKB_CB(skb)->seq.output);
216 ESP_SKB_CB(skb)->tmp = tmp;
217 err = crypto_aead_givencrypt(req);
218 if (err == -EINPROGRESS)
230 static int esp_input_done2(struct sk_buff *skb, int err)
233 struct xfrm_state *x = xfrm_input_state(skb);
234 struct esp_data *esp = x->data;
235 struct crypto_aead *aead = esp->aead;
236 int alen = crypto_aead_authsize(aead);
237 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
238 int elen = skb->len - hlen;
243 kfree(ESP_SKB_CB(skb)->tmp);
248 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
253 if (padlen + 2 + alen >= elen)
256 /* ... check padding bits here. Silly. :-) */
262 struct xfrm_encap_tmpl *encap = x->encap;
263 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
266 * 1) if the NAT-T peer's IP or port changed then
267 * advertize the change to the keying daemon.
268 * This is an inbound SA, so just compare
271 if (iph->saddr != x->props.saddr.a4 ||
272 uh->source != encap->encap_sport) {
273 xfrm_address_t ipaddr;
275 ipaddr.a4 = iph->saddr;
276 km_new_mapping(x, &ipaddr, uh->source);
278 /* XXX: perhaps add an extra
279 * policy check here, to see
280 * if we should allow or
281 * reject a packet from a
288 * 2) ignore UDP/TCP checksums in case
289 * of NAT-T in Transport Mode, or
290 * perform other post-processing fixes
291 * as per draft-ietf-ipsec-udp-encaps-06,
294 if (x->props.mode == XFRM_MODE_TRANSPORT)
295 skb->ip_summed = CHECKSUM_UNNECESSARY;
298 pskb_trim(skb, skb->len - alen - padlen - 2);
299 __skb_pull(skb, hlen);
300 skb_set_transport_header(skb, -ihl);
304 /* RFC4303: Drop dummy packets without any error */
305 if (err == IPPROTO_NONE)
312 static void esp_input_done(struct crypto_async_request *base, int err)
314 struct sk_buff *skb = base->data;
316 xfrm_input_resume(skb, esp_input_done2(skb, err));
320 * Note: detecting truncated vs. non-truncated authentication data is very
321 * expensive, so we only support truncated data, which is the recommended
324 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
326 struct ip_esp_hdr *esph;
327 struct esp_data *esp = x->data;
328 struct crypto_aead *aead = esp->aead;
329 struct aead_request *req;
330 struct sk_buff *trailer;
331 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
335 struct scatterlist *sg;
336 struct scatterlist *asg;
339 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
345 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
350 tmp = esp_alloc_tmp(aead, nfrags + 1);
354 ESP_SKB_CB(skb)->tmp = tmp;
355 iv = esp_tmp_iv(aead, tmp);
356 req = esp_tmp_req(aead, iv);
357 asg = esp_req_sg(aead, req);
360 skb->ip_summed = CHECKSUM_NONE;
362 esph = (struct ip_esp_hdr *)skb->data;
364 /* Get ivec. This can be wrong, check against another impls. */
367 sg_init_table(sg, nfrags);
368 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
369 sg_init_one(asg, esph, sizeof(*esph));
371 aead_request_set_callback(req, 0, esp_input_done, skb);
372 aead_request_set_crypt(req, sg, sg, elen, iv);
373 aead_request_set_assoc(req, asg, sizeof(*esph));
375 err = crypto_aead_decrypt(req);
376 if (err == -EINPROGRESS)
379 err = esp_input_done2(skb, err);
385 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
387 struct esp_data *esp = x->data;
388 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
389 u32 align = max_t(u32, blksize, esp->padlen);
392 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
393 rem = mtu & (align - 1);
396 switch (x->props.mode) {
397 case XFRM_MODE_TUNNEL:
400 case XFRM_MODE_TRANSPORT:
403 mtu += min_t(u32, blksize - 4, rem);
406 /* The worst case. */
407 mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
414 static void esp4_err(struct sk_buff *skb, u32 info)
416 struct iphdr *iph = (struct iphdr*)skb->data;
417 struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2));
418 struct xfrm_state *x;
420 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
421 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
424 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
427 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
428 ntohl(esph->spi), ntohl(iph->daddr));
432 static void esp_destroy(struct xfrm_state *x)
434 struct esp_data *esp = x->data;
439 crypto_free_aead(esp->aead);
443 static int esp_init_aead(struct xfrm_state *x)
445 struct esp_data *esp = x->data;
446 struct crypto_aead *aead;
449 aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
456 err = crypto_aead_setkey(aead, x->aead->alg_key,
457 (x->aead->alg_key_len + 7) / 8);
461 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
469 static int esp_init_authenc(struct xfrm_state *x)
471 struct esp_data *esp = x->data;
472 struct crypto_aead *aead;
473 struct crypto_authenc_key_param *param;
477 char authenc_name[CRYPTO_MAX_ALG_NAME];
486 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
487 x->aalg ? x->aalg->alg_name : "digest_null",
488 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
491 aead = crypto_alloc_aead(authenc_name, 0, 0);
498 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
499 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
501 key = kmalloc(keylen, GFP_KERNEL);
507 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
508 rta->rta_len = RTA_LENGTH(sizeof(*param));
509 param = RTA_DATA(rta);
510 p += RTA_SPACE(sizeof(*param));
513 struct xfrm_algo_desc *aalg_desc;
515 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
516 p += (x->aalg->alg_key_len + 7) / 8;
518 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
522 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
523 crypto_aead_authsize(aead)) {
524 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
526 crypto_aead_authsize(aead),
527 aalg_desc->uinfo.auth.icv_fullbits/8);
531 err = crypto_aead_setauthsize(
532 aead, aalg_desc->uinfo.auth.icv_truncbits / 8);
537 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
538 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
540 err = crypto_aead_setkey(aead, key, keylen);
549 static int esp_init_state(struct xfrm_state *x)
551 struct esp_data *esp;
552 struct crypto_aead *aead;
556 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
563 err = esp_init_aead(x);
565 err = esp_init_authenc(x);
574 x->props.header_len = sizeof(struct ip_esp_hdr) +
575 crypto_aead_ivsize(aead);
576 if (x->props.mode == XFRM_MODE_TUNNEL)
577 x->props.header_len += sizeof(struct iphdr);
578 else if (x->props.mode == XFRM_MODE_BEET)
579 x->props.header_len += IPV4_BEET_PHMAXLEN;
581 struct xfrm_encap_tmpl *encap = x->encap;
583 switch (encap->encap_type) {
586 case UDP_ENCAP_ESPINUDP:
587 x->props.header_len += sizeof(struct udphdr);
589 case UDP_ENCAP_ESPINUDP_NON_IKE:
590 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
595 align = ALIGN(crypto_aead_blocksize(aead), 4);
597 align = max_t(u32, align, esp->padlen);
598 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
604 static const struct xfrm_type esp_type =
606 .description = "ESP4",
607 .owner = THIS_MODULE,
608 .proto = IPPROTO_ESP,
609 .flags = XFRM_TYPE_REPLAY_PROT,
610 .init_state = esp_init_state,
611 .destructor = esp_destroy,
612 .get_mtu = esp4_get_mtu,
617 static struct net_protocol esp4_protocol = {
618 .handler = xfrm4_rcv,
619 .err_handler = esp4_err,
623 static int __init esp4_init(void)
625 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
626 printk(KERN_INFO "ip esp init: can't add xfrm type\n");
629 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
630 printk(KERN_INFO "ip esp init: can't add protocol\n");
631 xfrm_unregister_type(&esp_type, AF_INET);
637 static void __exit esp4_fini(void)
639 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
640 printk(KERN_INFO "ip esp close: can't remove protocol\n");
641 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
642 printk(KERN_INFO "ip esp close: can't remove xfrm type\n");
645 module_init(esp4_init);
646 module_exit(esp4_fini);
647 MODULE_LICENSE("GPL");
648 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);