2 #include <linux/module.h>
6 #include <asm/scatterlist.h>
7 #include <linux/crypto.h>
8 #include <linux/kernel.h>
9 #include <linux/pfkeyv2.h>
10 #include <linux/random.h>
12 #include <net/protocol.h>
15 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
18 struct iphdr *top_iph;
19 struct ip_esp_hdr *esph;
20 struct crypto_blkcipher *tfm;
21 struct blkcipher_desc desc;
23 struct sk_buff *trailer;
29 /* Strip IP+ESP header. */
30 __skb_pull(skb, skb->h.raw - skb->data);
31 /* Now skb is pure payload to encrypt */
35 /* Round to block size */
39 alen = esp->auth.icv_trunc_len;
43 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
44 clen = ALIGN(clen + 2, blksize);
46 clen = ALIGN(clen, esp->conf.padlen);
48 if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
54 for (i=0; i<clen-skb->len - 2; i++)
55 *(u8*)(trailer->tail + i) = i+1;
57 *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;
58 pskb_put(skb, trailer, clen - skb->len);
60 __skb_push(skb, skb->data - skb->nh.raw);
61 top_iph = skb->nh.iph;
62 esph = (struct ip_esp_hdr *)(skb->nh.raw + top_iph->ihl*4);
63 top_iph->tot_len = htons(skb->len + alen);
64 *(u8*)(trailer->tail - 1) = top_iph->protocol;
66 /* this is non-NULL only with UDP Encapsulation */
68 struct xfrm_encap_tmpl *encap = x->encap;
72 uh = (struct udphdr *)esph;
73 uh->source = encap->encap_sport;
74 uh->dest = encap->encap_dport;
75 uh->len = htons(skb->len + alen - top_iph->ihl*4);
78 switch (encap->encap_type) {
80 case UDP_ENCAP_ESPINUDP:
81 esph = (struct ip_esp_hdr *)(uh + 1);
83 case UDP_ENCAP_ESPINUDP_NON_IKE:
84 udpdata32 = (__be32 *)(uh + 1);
85 udpdata32[0] = udpdata32[1] = 0;
86 esph = (struct ip_esp_hdr *)(udpdata32 + 2);
90 top_iph->protocol = IPPROTO_UDP;
92 top_iph->protocol = IPPROTO_ESP;
94 esph->spi = x->id.spi;
95 esph->seq_no = htonl(++x->replay.oseq);
96 xfrm_aevent_doreplay(x);
98 if (esp->conf.ivlen) {
99 if (unlikely(!esp->conf.ivinitted)) {
100 get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
101 esp->conf.ivinitted = 1;
103 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
107 struct scatterlist *sg = &esp->sgbuf[0];
109 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
110 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
114 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
115 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
116 if (unlikely(sg != &esp->sgbuf[0]))
123 if (esp->conf.ivlen) {
124 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
125 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
128 if (esp->auth.icv_full_len) {
129 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
130 sizeof(*esph) + esp->conf.ivlen + clen);
131 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
134 ip_send_check(top_iph);
141 * Note: detecting truncated vs. non-truncated authentication data is very
142 * expensive, so we only support truncated data, which is the recommended
145 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
148 struct ip_esp_hdr *esph;
149 struct esp_data *esp = x->data;
150 struct crypto_blkcipher *tfm = esp->conf.tfm;
151 struct blkcipher_desc desc = { .tfm = tfm };
152 struct sk_buff *trailer;
153 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
154 int alen = esp->auth.icv_trunc_len;
155 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
159 struct scatterlist *sg;
163 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
166 if (elen <= 0 || (elen & (blksize-1)))
169 /* If integrity check is required, do this. */
170 if (esp->auth.icv_full_len) {
173 err = esp_mac_digest(esp, skb, 0, skb->len - alen);
177 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
180 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
181 x->stats.integrity_failed++;
186 if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0)
189 skb->ip_summed = CHECKSUM_NONE;
191 esph = (struct ip_esp_hdr*)skb->data;
193 /* Get ivec. This can be wrong, check against another impls. */
195 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
199 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
200 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
204 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
205 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
206 if (unlikely(sg != &esp->sgbuf[0]))
211 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
215 if (padlen+2 >= elen)
218 /* ... check padding bits here. Silly. :-) */
224 struct xfrm_encap_tmpl *encap = x->encap;
225 struct udphdr *uh = (void *)(skb->nh.raw + ihl);
228 * 1) if the NAT-T peer's IP or port changed then
229 * advertize the change to the keying daemon.
230 * This is an inbound SA, so just compare
233 if (iph->saddr != x->props.saddr.a4 ||
234 uh->source != encap->encap_sport) {
235 xfrm_address_t ipaddr;
237 ipaddr.a4 = iph->saddr;
238 km_new_mapping(x, &ipaddr, uh->source);
240 /* XXX: perhaps add an extra
241 * policy check here, to see
242 * if we should allow or
243 * reject a packet from a
250 * 2) ignore UDP/TCP checksums in case
251 * of NAT-T in Transport Mode, or
252 * perform other post-processing fixes
253 * as per draft-ietf-ipsec-udp-encaps-06,
256 if (x->props.mode == XFRM_MODE_TRANSPORT ||
257 x->props.mode == XFRM_MODE_BEET)
258 skb->ip_summed = CHECKSUM_UNNECESSARY;
261 iph->protocol = nexthdr[1];
262 pskb_trim(skb, skb->len - alen - padlen - 2);
263 skb->h.raw = __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen) - ihl;
271 static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
273 struct esp_data *esp = x->data;
274 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
277 switch (x->props.mode) {
278 case XFRM_MODE_TUNNEL:
279 mtu = ALIGN(mtu +2, blksize);
282 case XFRM_MODE_TRANSPORT:
284 mtu = ALIGN(mtu + 2, 4) + blksize - 4;
287 /* The worst case. */
288 enclen = IPV4_BEET_PHMAXLEN;
289 mtu = ALIGN(mtu + enclen + 2, blksize);
293 if (esp->conf.padlen)
294 mtu = ALIGN(mtu, esp->conf.padlen);
296 return mtu + x->props.header_len + esp->auth.icv_trunc_len - enclen;
299 static void esp4_err(struct sk_buff *skb, u32 info)
301 struct iphdr *iph = (struct iphdr*)skb->data;
302 struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2));
303 struct xfrm_state *x;
305 if (skb->h.icmph->type != ICMP_DEST_UNREACH ||
306 skb->h.icmph->code != ICMP_FRAG_NEEDED)
309 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
312 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
313 ntohl(esph->spi), ntohl(iph->daddr));
317 static void esp_destroy(struct xfrm_state *x)
319 struct esp_data *esp = x->data;
324 crypto_free_blkcipher(esp->conf.tfm);
325 esp->conf.tfm = NULL;
326 kfree(esp->conf.ivec);
327 esp->conf.ivec = NULL;
328 crypto_free_hash(esp->auth.tfm);
329 esp->auth.tfm = NULL;
330 kfree(esp->auth.work_icv);
331 esp->auth.work_icv = NULL;
335 static int esp_init_state(struct xfrm_state *x)
337 struct esp_data *esp = NULL;
338 struct crypto_blkcipher *tfm;
340 /* null auth and encryption can have zero length keys */
342 if (x->aalg->alg_key_len > 512)
348 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
353 struct xfrm_algo_desc *aalg_desc;
354 struct crypto_hash *hash;
356 esp->auth.key = x->aalg->alg_key;
357 esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
358 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
363 esp->auth.tfm = hash;
364 if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
367 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
370 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
371 crypto_hash_digestsize(hash)) {
372 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
374 crypto_hash_digestsize(hash),
375 aalg_desc->uinfo.auth.icv_fullbits/8);
379 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
380 esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
382 esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
383 if (!esp->auth.work_icv)
386 esp->conf.key = x->ealg->alg_key;
387 esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
388 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
392 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
393 esp->conf.padlen = 0;
394 if (esp->conf.ivlen) {
395 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
396 if (unlikely(esp->conf.ivec == NULL))
398 esp->conf.ivinitted = 0;
400 if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
402 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
403 if (x->props.mode == XFRM_MODE_TUNNEL)
404 x->props.header_len += sizeof(struct iphdr);
406 struct xfrm_encap_tmpl *encap = x->encap;
408 switch (encap->encap_type) {
411 case UDP_ENCAP_ESPINUDP:
412 x->props.header_len += sizeof(struct udphdr);
414 case UDP_ENCAP_ESPINUDP_NON_IKE:
415 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
420 x->props.trailer_len = esp4_get_max_size(x, 0) - x->props.header_len;
430 static struct xfrm_type esp_type =
432 .description = "ESP4",
433 .owner = THIS_MODULE,
434 .proto = IPPROTO_ESP,
435 .init_state = esp_init_state,
436 .destructor = esp_destroy,
437 .get_max_size = esp4_get_max_size,
442 static struct net_protocol esp4_protocol = {
443 .handler = xfrm4_rcv,
444 .err_handler = esp4_err,
448 static int __init esp4_init(void)
450 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
451 printk(KERN_INFO "ip esp init: can't add xfrm type\n");
454 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
455 printk(KERN_INFO "ip esp init: can't add protocol\n");
456 xfrm_unregister_type(&esp_type, AF_INET);
462 static void __exit esp4_fini(void)
464 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
465 printk(KERN_INFO "ip esp close: can't remove protocol\n");
466 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
467 printk(KERN_INFO "ip esp close: can't remove xfrm type\n");
470 module_init(esp4_init);
471 module_exit(esp4_fini);
472 MODULE_LICENSE("GPL");