2 #include <linux/module.h>
6 #include <linux/scatterlist.h>
7 #include <linux/crypto.h>
8 #include <linux/kernel.h>
9 #include <linux/pfkeyv2.h>
10 #include <linux/random.h>
11 #include <linux/spinlock.h>
12 #include <linux/in6.h>
14 #include <net/protocol.h>
17 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
20 struct ip_esp_hdr *esph;
21 struct crypto_blkcipher *tfm;
22 struct blkcipher_desc desc;
24 struct sk_buff *trailer;
31 /* skb is pure payload to encrypt */
35 /* Round to block size */
39 alen = esp->auth.icv_trunc_len;
43 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
44 clen = ALIGN(clen + 2, blksize);
46 clen = ALIGN(clen, esp->conf.padlen);
48 if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
52 tail = skb_tail_pointer(trailer);
55 for (i=0; i<clen-skb->len - 2; i++)
58 tail[clen - skb->len - 2] = (clen - skb->len) - 2;
59 pskb_put(skb, trailer, clen - skb->len);
61 skb_push(skb, -skb_network_offset(skb));
62 esph = ip_esp_hdr(skb);
63 *(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb);
64 *skb_mac_header(skb) = IPPROTO_ESP;
66 spin_lock_bh(&x->lock);
68 /* this is non-NULL only with UDP Encapsulation */
70 struct xfrm_encap_tmpl *encap = x->encap;
74 uh = (struct udphdr *)esph;
75 uh->source = encap->encap_sport;
76 uh->dest = encap->encap_dport;
77 uh->len = htons(skb->len + alen - skb_transport_offset(skb));
80 switch (encap->encap_type) {
82 case UDP_ENCAP_ESPINUDP:
83 esph = (struct ip_esp_hdr *)(uh + 1);
85 case UDP_ENCAP_ESPINUDP_NON_IKE:
86 udpdata32 = (__be32 *)(uh + 1);
87 udpdata32[0] = udpdata32[1] = 0;
88 esph = (struct ip_esp_hdr *)(udpdata32 + 2);
92 *skb_mac_header(skb) = IPPROTO_UDP;
95 esph->spi = x->id.spi;
96 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
98 if (esp->conf.ivlen) {
99 if (unlikely(!esp->conf.ivinitted)) {
100 get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
101 esp->conf.ivinitted = 1;
103 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
107 struct scatterlist *sg = &esp->sgbuf[0];
109 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
110 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
114 sg_init_table(sg, nfrags);
115 skb_to_sgvec(skb, sg,
119 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
120 if (unlikely(sg != &esp->sgbuf[0]))
127 if (esp->conf.ivlen) {
128 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
129 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
132 if (esp->auth.icv_full_len) {
133 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
134 sizeof(*esph) + esp->conf.ivlen + clen);
135 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
139 spin_unlock_bh(&x->lock);
146 * Note: detecting truncated vs. non-truncated authentication data is very
147 * expensive, so we only support truncated data, which is the recommended
150 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
153 struct ip_esp_hdr *esph;
154 struct esp_data *esp = x->data;
155 struct crypto_blkcipher *tfm = esp->conf.tfm;
156 struct blkcipher_desc desc = { .tfm = tfm };
157 struct sk_buff *trailer;
158 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
159 int alen = esp->auth.icv_trunc_len;
160 int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen;
164 struct scatterlist *sg;
168 if (!pskb_may_pull(skb, sizeof(*esph)))
171 if (elen <= 0 || (elen & (blksize-1)))
174 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
178 skb->ip_summed = CHECKSUM_NONE;
182 /* If integrity check is required, do this. */
183 if (esp->auth.icv_full_len) {
186 err = esp_mac_digest(esp, skb, 0, skb->len - alen);
190 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
193 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
194 xfrm_audit_state_icvfail(x, skb, IPPROTO_ESP);
200 esph = (struct ip_esp_hdr *)skb->data;
202 /* Get ivec. This can be wrong, check against another impls. */
204 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
208 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
210 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
214 sg_init_table(sg, nfrags);
215 skb_to_sgvec(skb, sg,
216 sizeof(*esph) + esp->conf.ivlen,
218 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
219 if (unlikely(sg != &esp->sgbuf[0]))
223 spin_unlock(&x->lock);
228 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
233 if (padlen+2 >= elen)
236 /* ... check padding bits here. Silly. :-) */
238 /* RFC4303: Drop dummy packets without any error */
239 if (nexthdr[1] == IPPROTO_NONE)
246 struct xfrm_encap_tmpl *encap = x->encap;
247 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
250 * 1) if the NAT-T peer's IP or port changed then
251 * advertize the change to the keying daemon.
252 * This is an inbound SA, so just compare
255 if (iph->saddr != x->props.saddr.a4 ||
256 uh->source != encap->encap_sport) {
257 xfrm_address_t ipaddr;
259 ipaddr.a4 = iph->saddr;
260 km_new_mapping(x, &ipaddr, uh->source);
262 /* XXX: perhaps add an extra
263 * policy check here, to see
264 * if we should allow or
265 * reject a packet from a
272 * 2) ignore UDP/TCP checksums in case
273 * of NAT-T in Transport Mode, or
274 * perform other post-processing fixes
275 * as per draft-ietf-ipsec-udp-encaps-06,
278 if (x->props.mode == XFRM_MODE_TRANSPORT)
279 skb->ip_summed = CHECKSUM_UNNECESSARY;
282 pskb_trim(skb, skb->len - alen - padlen - 2);
283 __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen);
284 skb_set_transport_header(skb, -ihl);
292 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
294 struct esp_data *esp = x->data;
295 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
296 u32 align = max_t(u32, blksize, esp->conf.padlen);
299 mtu -= x->props.header_len + esp->auth.icv_trunc_len;
300 rem = mtu & (align - 1);
303 switch (x->props.mode) {
304 case XFRM_MODE_TUNNEL:
307 case XFRM_MODE_TRANSPORT:
310 mtu += min_t(u32, blksize - 4, rem);
313 /* The worst case. */
314 mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
321 static void esp4_err(struct sk_buff *skb, u32 info)
323 struct iphdr *iph = (struct iphdr*)skb->data;
324 struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2));
325 struct xfrm_state *x;
327 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
328 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
331 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
334 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
335 ntohl(esph->spi), ntohl(iph->daddr));
339 static void esp_destroy(struct xfrm_state *x)
341 struct esp_data *esp = x->data;
346 crypto_free_blkcipher(esp->conf.tfm);
347 esp->conf.tfm = NULL;
348 kfree(esp->conf.ivec);
349 esp->conf.ivec = NULL;
350 crypto_free_hash(esp->auth.tfm);
351 esp->auth.tfm = NULL;
352 kfree(esp->auth.work_icv);
353 esp->auth.work_icv = NULL;
357 static int esp_init_state(struct xfrm_state *x)
359 struct esp_data *esp = NULL;
360 struct crypto_blkcipher *tfm;
366 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
371 struct xfrm_algo_desc *aalg_desc;
372 struct crypto_hash *hash;
374 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
379 esp->auth.tfm = hash;
380 if (crypto_hash_setkey(hash, x->aalg->alg_key,
381 (x->aalg->alg_key_len + 7) / 8))
384 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
387 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
388 crypto_hash_digestsize(hash)) {
389 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
391 crypto_hash_digestsize(hash),
392 aalg_desc->uinfo.auth.icv_fullbits/8);
396 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
397 esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
399 esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
400 if (!esp->auth.work_icv)
404 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
408 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
409 esp->conf.padlen = 0;
410 if (esp->conf.ivlen) {
411 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
412 if (unlikely(esp->conf.ivec == NULL))
414 esp->conf.ivinitted = 0;
416 if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key,
417 (x->ealg->alg_key_len + 7) / 8))
419 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
420 if (x->props.mode == XFRM_MODE_TUNNEL)
421 x->props.header_len += sizeof(struct iphdr);
422 else if (x->props.mode == XFRM_MODE_BEET)
423 x->props.header_len += IPV4_BEET_PHMAXLEN;
425 struct xfrm_encap_tmpl *encap = x->encap;
427 switch (encap->encap_type) {
430 case UDP_ENCAP_ESPINUDP:
431 x->props.header_len += sizeof(struct udphdr);
433 case UDP_ENCAP_ESPINUDP_NON_IKE:
434 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
439 align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
440 if (esp->conf.padlen)
441 align = max_t(u32, align, esp->conf.padlen);
442 x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len;
452 static struct xfrm_type esp_type =
454 .description = "ESP4",
455 .owner = THIS_MODULE,
456 .proto = IPPROTO_ESP,
457 .flags = XFRM_TYPE_REPLAY_PROT,
458 .init_state = esp_init_state,
459 .destructor = esp_destroy,
460 .get_mtu = esp4_get_mtu,
465 static struct net_protocol esp4_protocol = {
466 .handler = xfrm4_rcv,
467 .err_handler = esp4_err,
471 static int __init esp4_init(void)
473 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
474 printk(KERN_INFO "ip esp init: can't add xfrm type\n");
477 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
478 printk(KERN_INFO "ip esp init: can't add protocol\n");
479 xfrm_unregister_type(&esp_type, AF_INET);
485 static void __exit esp4_fini(void)
487 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
488 printk(KERN_INFO "ip esp close: can't remove protocol\n");
489 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
490 printk(KERN_INFO "ip esp close: can't remove xfrm type\n");
493 module_init(esp4_init);
494 module_exit(esp4_fini);
495 MODULE_LICENSE("GPL");
496 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);