1 /* xfrm_user.c: User interface to configure xfrm engine.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/socket.h>
18 #include <linux/string.h>
19 #include <linux/net.h>
20 #include <linux/skbuff.h>
21 #include <linux/netlink.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/pfkeyv2.h>
24 #include <linux/ipsec.h>
25 #include <linux/init.h>
26 #include <linux/security.h>
29 #include <asm/uaccess.h>
31 static struct sock *xfrm_nl;
33 static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
35 struct rtattr *rt = xfrma[type - 1];
36 struct xfrm_algo *algp;
42 len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp);
48 len -= (algp->alg_key_len + 7U) / 8;
54 if (!algp->alg_key_len &&
55 strcmp(algp->alg_name, "digest_null") != 0)
60 if (!algp->alg_key_len &&
61 strcmp(algp->alg_name, "cipher_null") != 0)
66 /* Zero length keys are legal. */
73 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
77 static int verify_encap_tmpl(struct rtattr **xfrma)
79 struct rtattr *rt = xfrma[XFRMA_ENCAP - 1];
80 struct xfrm_encap_tmpl *encap;
85 if ((rt->rta_len - sizeof(*rt)) < sizeof(*encap))
91 static int verify_newsa_info(struct xfrm_usersa_info *p,
92 struct rtattr **xfrma)
102 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
114 switch (p->id.proto) {
116 if (!xfrma[XFRMA_ALG_AUTH-1] ||
117 xfrma[XFRMA_ALG_CRYPT-1] ||
118 xfrma[XFRMA_ALG_COMP-1])
123 if ((!xfrma[XFRMA_ALG_AUTH-1] &&
124 !xfrma[XFRMA_ALG_CRYPT-1]) ||
125 xfrma[XFRMA_ALG_COMP-1])
130 if (!xfrma[XFRMA_ALG_COMP-1] ||
131 xfrma[XFRMA_ALG_AUTH-1] ||
132 xfrma[XFRMA_ALG_CRYPT-1])
140 if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH)))
142 if ((err = verify_one_alg(xfrma, XFRMA_ALG_CRYPT)))
144 if ((err = verify_one_alg(xfrma, XFRMA_ALG_COMP)))
146 if ((err = verify_encap_tmpl(xfrma)))
165 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
166 struct xfrm_algo_desc *(*get_byname)(char *, int),
167 struct rtattr *u_arg)
169 struct rtattr *rta = u_arg;
170 struct xfrm_algo *p, *ualg;
171 struct xfrm_algo_desc *algo;
177 ualg = RTA_DATA(rta);
179 algo = get_byname(ualg->alg_name, 1);
182 *props = algo->desc.sadb_alg_id;
184 len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8;
185 p = kmalloc(len, GFP_KERNEL);
189 memcpy(p, ualg, len);
194 static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg)
196 struct rtattr *rta = u_arg;
197 struct xfrm_encap_tmpl *p, *uencap;
202 uencap = RTA_DATA(rta);
203 p = kmalloc(sizeof(*p), GFP_KERNEL);
207 memcpy(p, uencap, sizeof(*p));
212 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
214 memcpy(&x->id, &p->id, sizeof(x->id));
215 memcpy(&x->sel, &p->sel, sizeof(x->sel));
216 memcpy(&x->lft, &p->lft, sizeof(x->lft));
217 x->props.mode = p->mode;
218 x->props.replay_window = p->replay_window;
219 x->props.reqid = p->reqid;
220 x->props.family = p->family;
221 x->props.saddr = p->saddr;
222 x->props.flags = p->flags;
225 static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
226 struct rtattr **xfrma,
229 struct xfrm_state *x = xfrm_state_alloc();
235 copy_from_user_state(x, p);
237 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
238 xfrm_aalg_get_byname,
239 xfrma[XFRMA_ALG_AUTH-1])))
241 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
242 xfrm_ealg_get_byname,
243 xfrma[XFRMA_ALG_CRYPT-1])))
245 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
246 xfrm_calg_get_byname,
247 xfrma[XFRMA_ALG_COMP-1])))
249 if ((err = attach_encap_tmpl(&x->encap, xfrma[XFRMA_ENCAP-1])))
252 err = xfrm_init_state(x);
261 x->km.state = XFRM_STATE_DEAD;
268 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
270 struct xfrm_usersa_info *p = NLMSG_DATA(nlh);
271 struct xfrm_state *x;
275 err = verify_newsa_info(p, (struct rtattr **) xfrma);
279 x = xfrm_state_construct(p, (struct rtattr **) xfrma, &err);
284 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
285 err = xfrm_state_add(x);
287 err = xfrm_state_update(x);
290 x->km.state = XFRM_STATE_DEAD;
295 c.seq = nlh->nlmsg_seq;
296 c.pid = nlh->nlmsg_pid;
297 c.event = nlh->nlmsg_type;
299 km_state_notify(x, &c);
305 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
307 struct xfrm_state *x;
310 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
312 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
316 if (xfrm_state_kern(x)) {
321 err = xfrm_state_delete(x);
327 c.seq = nlh->nlmsg_seq;
328 c.pid = nlh->nlmsg_pid;
329 c.event = nlh->nlmsg_type;
330 km_state_notify(x, &c);
336 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
338 memcpy(&p->id, &x->id, sizeof(p->id));
339 memcpy(&p->sel, &x->sel, sizeof(p->sel));
340 memcpy(&p->lft, &x->lft, sizeof(p->lft));
341 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
342 memcpy(&p->stats, &x->stats, sizeof(p->stats));
343 p->saddr = x->props.saddr;
344 p->mode = x->props.mode;
345 p->replay_window = x->props.replay_window;
346 p->reqid = x->props.reqid;
347 p->family = x->props.family;
348 p->flags = x->props.flags;
352 struct xfrm_dump_info {
353 struct sk_buff *in_skb;
354 struct sk_buff *out_skb;
361 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
363 struct xfrm_dump_info *sp = ptr;
364 struct sk_buff *in_skb = sp->in_skb;
365 struct sk_buff *skb = sp->out_skb;
366 struct xfrm_usersa_info *p;
367 struct nlmsghdr *nlh;
368 unsigned char *b = skb->tail;
370 if (sp->this_idx < sp->start_idx)
373 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
375 XFRM_MSG_NEWSA, sizeof(*p));
376 nlh->nlmsg_flags = sp->nlmsg_flags;
379 copy_to_user_state(x, p);
382 RTA_PUT(skb, XFRMA_ALG_AUTH,
383 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
385 RTA_PUT(skb, XFRMA_ALG_CRYPT,
386 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
388 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
391 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
393 nlh->nlmsg_len = skb->tail - b;
400 skb_trim(skb, b - skb->data);
404 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
406 struct xfrm_dump_info info;
408 info.in_skb = cb->skb;
410 info.nlmsg_seq = cb->nlh->nlmsg_seq;
411 info.nlmsg_flags = NLM_F_MULTI;
413 info.start_idx = cb->args[0];
414 (void) xfrm_state_walk(IPSEC_PROTO_ANY, dump_one_state, &info);
415 cb->args[0] = info.this_idx;
420 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
421 struct xfrm_state *x, u32 seq)
423 struct xfrm_dump_info info;
426 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
428 return ERR_PTR(-ENOMEM);
430 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
431 info.in_skb = in_skb;
433 info.nlmsg_seq = seq;
434 info.nlmsg_flags = 0;
435 info.this_idx = info.start_idx = 0;
437 if (dump_one_state(x, 0, &info)) {
445 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
447 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
448 struct xfrm_state *x;
449 struct sk_buff *resp_skb;
452 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
457 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
458 if (IS_ERR(resp_skb)) {
459 err = PTR_ERR(resp_skb);
461 err = netlink_unicast(xfrm_nl, resp_skb,
462 NETLINK_CB(skb).pid, MSG_DONTWAIT);
469 static int verify_userspi_info(struct xfrm_userspi_info *p)
471 switch (p->info.id.proto) {
477 /* IPCOMP spi is 16-bits. */
478 if (p->max >= 0x10000)
492 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
494 struct xfrm_state *x;
495 struct xfrm_userspi_info *p;
496 struct sk_buff *resp_skb;
497 xfrm_address_t *daddr;
502 err = verify_userspi_info(p);
506 family = p->info.family;
507 daddr = &p->info.id.daddr;
511 x = xfrm_find_acq_byseq(p->info.seq);
512 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
519 x = xfrm_find_acq(p->info.mode, p->info.reqid,
520 p->info.id.proto, daddr,
527 resp_skb = ERR_PTR(-ENOENT);
529 spin_lock_bh(&x->lock);
530 if (x->km.state != XFRM_STATE_DEAD) {
531 xfrm_alloc_spi(x, htonl(p->min), htonl(p->max));
533 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
535 spin_unlock_bh(&x->lock);
537 if (IS_ERR(resp_skb)) {
538 err = PTR_ERR(resp_skb);
542 err = netlink_unicast(xfrm_nl, resp_skb,
543 NETLINK_CB(skb).pid, MSG_DONTWAIT);
551 static int verify_policy_dir(__u8 dir)
555 case XFRM_POLICY_OUT:
556 case XFRM_POLICY_FWD:
566 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
570 case XFRM_SHARE_SESSION:
571 case XFRM_SHARE_USER:
572 case XFRM_SHARE_UNIQUE:
580 case XFRM_POLICY_ALLOW:
581 case XFRM_POLICY_BLOCK:
588 switch (p->sel.family) {
593 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
596 return -EAFNOSUPPORT;
603 return verify_policy_dir(p->dir);
606 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
612 for (i = 0; i < nr; i++, ut++) {
613 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
615 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
616 memcpy(&t->saddr, &ut->saddr,
617 sizeof(xfrm_address_t));
618 t->reqid = ut->reqid;
620 t->share = ut->share;
621 t->optional = ut->optional;
622 t->aalgos = ut->aalgos;
623 t->ealgos = ut->ealgos;
624 t->calgos = ut->calgos;
628 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma)
630 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
631 struct xfrm_user_tmpl *utmpl;
637 nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl);
639 if (nr > XFRM_MAX_DEPTH)
642 copy_templates(pol, RTA_DATA(rt), nr);
647 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
649 xp->priority = p->priority;
650 xp->index = p->index;
651 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
652 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
653 xp->action = p->action;
654 xp->flags = p->flags;
655 xp->family = p->sel.family;
656 /* XXX xp->share = p->share; */
659 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
661 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
662 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
663 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
664 p->priority = xp->priority;
665 p->index = xp->index;
666 p->sel.family = xp->family;
668 p->action = xp->action;
669 p->flags = xp->flags;
670 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
673 static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **xfrma, int *errp)
675 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL);
683 copy_from_user_policy(xp, p);
684 err = copy_from_user_tmpl(xp, xfrma);
694 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
696 struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh);
697 struct xfrm_policy *xp;
702 err = verify_newpolicy_info(p);
706 xp = xfrm_policy_construct(p, (struct rtattr **) xfrma, &err);
710 /* shouldnt excl be based on nlh flags??
711 * Aha! this is anti-netlink really i.e more pfkey derived
712 * in netlink excl is a flag and you wouldnt need
713 * a type XFRM_MSG_UPDPOLICY - JHS */
714 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
715 err = xfrm_policy_insert(p->dir, xp, excl);
721 c.event = nlh->nlmsg_type;
722 c.seq = nlh->nlmsg_seq;
723 c.pid = nlh->nlmsg_pid;
724 km_policy_notify(xp, p->dir, &c);
731 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
733 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
736 if (xp->xfrm_nr == 0)
739 for (i = 0; i < xp->xfrm_nr; i++) {
740 struct xfrm_user_tmpl *up = &vec[i];
741 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
743 memcpy(&up->id, &kp->id, sizeof(up->id));
744 up->family = xp->family;
745 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
746 up->reqid = kp->reqid;
748 up->share = kp->share;
749 up->optional = kp->optional;
750 up->aalgos = kp->aalgos;
751 up->ealgos = kp->ealgos;
752 up->calgos = kp->calgos;
754 RTA_PUT(skb, XFRMA_TMPL,
755 (sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr),
764 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
766 struct xfrm_dump_info *sp = ptr;
767 struct xfrm_userpolicy_info *p;
768 struct sk_buff *in_skb = sp->in_skb;
769 struct sk_buff *skb = sp->out_skb;
770 struct nlmsghdr *nlh;
771 unsigned char *b = skb->tail;
773 if (sp->this_idx < sp->start_idx)
776 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
778 XFRM_MSG_NEWPOLICY, sizeof(*p));
780 nlh->nlmsg_flags = sp->nlmsg_flags;
782 copy_to_user_policy(xp, p, dir);
783 if (copy_to_user_tmpl(xp, skb) < 0)
786 nlh->nlmsg_len = skb->tail - b;
792 skb_trim(skb, b - skb->data);
796 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
798 struct xfrm_dump_info info;
800 info.in_skb = cb->skb;
802 info.nlmsg_seq = cb->nlh->nlmsg_seq;
803 info.nlmsg_flags = NLM_F_MULTI;
805 info.start_idx = cb->args[0];
806 (void) xfrm_policy_walk(dump_one_policy, &info);
807 cb->args[0] = info.this_idx;
812 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
813 struct xfrm_policy *xp,
816 struct xfrm_dump_info info;
819 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
821 return ERR_PTR(-ENOMEM);
823 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
824 info.in_skb = in_skb;
826 info.nlmsg_seq = seq;
827 info.nlmsg_flags = 0;
828 info.this_idx = info.start_idx = 0;
830 if (dump_one_policy(xp, dir, 0, &info) < 0) {
838 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
840 struct xfrm_policy *xp;
841 struct xfrm_userpolicy_id *p;
847 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
849 err = verify_policy_dir(p->dir);
854 xp = xfrm_policy_byid(p->dir, p->index, delete);
856 xp = xfrm_policy_bysel(p->dir, &p->sel, delete);
861 struct sk_buff *resp_skb;
863 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
864 if (IS_ERR(resp_skb)) {
865 err = PTR_ERR(resp_skb);
867 err = netlink_unicast(xfrm_nl, resp_skb,
872 c.data.byid = p->index;
873 c.event = nlh->nlmsg_type;
874 c.seq = nlh->nlmsg_seq;
875 c.pid = nlh->nlmsg_pid;
876 km_policy_notify(xp, p->dir, &c);
884 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
887 struct xfrm_usersa_flush *p = NLMSG_DATA(nlh);
889 xfrm_state_flush(p->proto);
890 c.data.proto = p->proto;
891 c.event = nlh->nlmsg_type;
892 c.seq = nlh->nlmsg_seq;
893 c.pid = nlh->nlmsg_pid;
894 km_state_notify(NULL, &c);
899 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
904 c.event = nlh->nlmsg_type;
905 c.seq = nlh->nlmsg_seq;
906 c.pid = nlh->nlmsg_pid;
907 km_policy_notify(NULL, 0, &c);
911 #define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type))
913 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
914 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
915 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
916 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
917 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
918 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
919 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
920 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
921 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
922 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
923 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
924 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
925 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
926 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
927 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = NLMSG_LENGTH(0),
932 static struct xfrm_link {
933 int (*doit)(struct sk_buff *, struct nlmsghdr *, void **);
934 int (*dump)(struct sk_buff *, struct netlink_callback *);
935 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
936 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
937 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
938 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
939 .dump = xfrm_dump_sa },
940 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
941 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
942 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
943 .dump = xfrm_dump_policy },
944 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
945 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
946 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
947 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
948 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
951 static int xfrm_done(struct netlink_callback *cb)
956 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
958 struct rtattr *xfrma[XFRMA_MAX];
959 struct xfrm_link *link;
962 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
965 type = nlh->nlmsg_type;
967 /* A control message: ignore them */
968 if (type < XFRM_MSG_BASE)
971 /* Unknown message: reply with EINVAL */
972 if (type > XFRM_MSG_MAX)
975 type -= XFRM_MSG_BASE;
976 link = &xfrm_dispatch[type];
978 /* All operations require privileges, even GET */
979 if (security_netlink_recv(skb)) {
984 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
985 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
986 (nlh->nlmsg_flags & NLM_F_DUMP)) {
989 if (link->dump == NULL)
992 if ((*errp = netlink_dump_start(xfrm_nl, skb, nlh,
997 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
1000 skb_pull(skb, rlen);
1004 memset(xfrma, 0, sizeof(xfrma));
1006 if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type]))
1009 if (nlh->nlmsg_len > min_len) {
1010 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
1011 struct rtattr *attr = (void *) nlh + NLMSG_ALIGN(min_len);
1013 while (RTA_OK(attr, attrlen)) {
1014 unsigned short flavor = attr->rta_type;
1016 if (flavor > XFRMA_MAX)
1018 xfrma[flavor - 1] = attr;
1020 attr = RTA_NEXT(attr, attrlen);
1024 if (link->doit == NULL)
1026 *errp = link->doit(skb, nlh, (void **) &xfrma);
1035 static int xfrm_user_rcv_skb(struct sk_buff *skb)
1038 struct nlmsghdr *nlh;
1040 while (skb->len >= NLMSG_SPACE(0)) {
1043 nlh = (struct nlmsghdr *) skb->data;
1044 if (nlh->nlmsg_len < sizeof(*nlh) ||
1045 skb->len < nlh->nlmsg_len)
1047 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
1048 if (rlen > skb->len)
1050 if (xfrm_user_rcv_msg(skb, nlh, &err) < 0) {
1053 netlink_ack(skb, nlh, err);
1054 } else if (nlh->nlmsg_flags & NLM_F_ACK)
1055 netlink_ack(skb, nlh, 0);
1056 skb_pull(skb, rlen);
1062 static void xfrm_netlink_rcv(struct sock *sk, int len)
1064 unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
1067 struct sk_buff *skb;
1069 down(&xfrm_cfg_sem);
1071 if (qlen > skb_queue_len(&sk->sk_receive_queue))
1072 qlen = skb_queue_len(&sk->sk_receive_queue);
1074 for (; qlen; qlen--) {
1075 skb = skb_dequeue(&sk->sk_receive_queue);
1076 if (xfrm_user_rcv_skb(skb)) {
1078 skb_queue_head(&sk->sk_receive_queue,
1094 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, int hard)
1096 struct xfrm_user_expire *ue;
1097 struct nlmsghdr *nlh;
1098 unsigned char *b = skb->tail;
1100 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_EXPIRE,
1102 ue = NLMSG_DATA(nlh);
1103 nlh->nlmsg_flags = 0;
1105 copy_to_user_state(x, &ue->state);
1106 ue->hard = (hard != 0) ? 1 : 0;
1108 nlh->nlmsg_len = skb->tail - b;
1112 skb_trim(skb, b - skb->data);
1116 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
1118 struct sk_buff *skb;
1119 int len = NLMSG_LENGTH(sizeof(struct xfrm_user_expire));
1121 skb = alloc_skb(len, GFP_ATOMIC);
1125 if (build_expire(skb, x, c->data.hard) < 0)
1128 NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE;
1130 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
1133 static int xfrm_notify_sa_flush(struct km_event *c)
1135 struct xfrm_usersa_flush *p;
1136 struct nlmsghdr *nlh;
1137 struct sk_buff *skb;
1139 int len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
1141 skb = alloc_skb(len, GFP_ATOMIC);
1146 nlh = NLMSG_PUT(skb, c->pid, c->seq,
1147 XFRM_MSG_FLUSHSA, sizeof(*p));
1148 nlh->nlmsg_flags = 0;
1150 p = NLMSG_DATA(nlh);
1151 p->proto = c->data.proto;
1153 nlh->nlmsg_len = skb->tail - b;
1155 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_SA, GFP_ATOMIC);
1162 static int inline xfrm_sa_len(struct xfrm_state *x)
1166 l += RTA_SPACE(sizeof(*x->aalg) + (x->aalg->alg_key_len+7)/8);
1168 l += RTA_SPACE(sizeof(*x->ealg) + (x->ealg->alg_key_len+7)/8);
1170 l += RTA_SPACE(sizeof(*x->calg));
1172 l += RTA_SPACE(sizeof(*x->encap));
1177 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
1179 struct xfrm_usersa_info *p;
1180 struct xfrm_usersa_id *id;
1181 struct nlmsghdr *nlh;
1182 struct sk_buff *skb;
1184 int len = xfrm_sa_len(x);
1187 headlen = sizeof(*p);
1188 if (c->event == XFRM_MSG_DELSA) {
1189 len += RTA_SPACE(headlen);
1190 headlen = sizeof(*id);
1192 len += NLMSG_SPACE(headlen);
1194 skb = alloc_skb(len, GFP_ATOMIC);
1199 nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen);
1200 nlh->nlmsg_flags = 0;
1202 p = NLMSG_DATA(nlh);
1203 if (c->event == XFRM_MSG_DELSA) {
1204 id = NLMSG_DATA(nlh);
1205 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
1206 id->spi = x->id.spi;
1207 id->family = x->props.family;
1208 id->proto = x->id.proto;
1210 p = RTA_DATA(__RTA_PUT(skb, XFRMA_SA, sizeof(*p)));
1213 copy_to_user_state(x, p);
1216 RTA_PUT(skb, XFRMA_ALG_AUTH,
1217 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
1219 RTA_PUT(skb, XFRMA_ALG_CRYPT,
1220 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
1222 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
1225 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
1227 nlh->nlmsg_len = skb->tail - b;
1229 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_SA, GFP_ATOMIC);
1237 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
1241 case XFRM_MSG_EXPIRE:
1242 return xfrm_exp_state_notify(x, c);
1243 case XFRM_MSG_DELSA:
1244 case XFRM_MSG_UPDSA:
1245 case XFRM_MSG_NEWSA:
1246 return xfrm_notify_sa(x, c);
1247 case XFRM_MSG_FLUSHSA:
1248 return xfrm_notify_sa_flush(c);
1250 printk("xfrm_user: Unknown SA event %d\n", c->event);
1258 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
1259 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
1262 struct xfrm_user_acquire *ua;
1263 struct nlmsghdr *nlh;
1264 unsigned char *b = skb->tail;
1265 __u32 seq = xfrm_get_acqseq();
1267 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE,
1269 ua = NLMSG_DATA(nlh);
1270 nlh->nlmsg_flags = 0;
1272 memcpy(&ua->id, &x->id, sizeof(ua->id));
1273 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
1274 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
1275 copy_to_user_policy(xp, &ua->policy, dir);
1276 ua->aalgos = xt->aalgos;
1277 ua->ealgos = xt->ealgos;
1278 ua->calgos = xt->calgos;
1279 ua->seq = x->km.seq = seq;
1281 if (copy_to_user_tmpl(xp, skb) < 0)
1284 nlh->nlmsg_len = skb->tail - b;
1288 skb_trim(skb, b - skb->data);
1292 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
1293 struct xfrm_policy *xp, int dir)
1295 struct sk_buff *skb;
1298 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1299 len += NLMSG_SPACE(sizeof(struct xfrm_user_acquire));
1300 skb = alloc_skb(len, GFP_ATOMIC);
1304 if (build_acquire(skb, x, xt, xp, dir) < 0)
1307 NETLINK_CB(skb).dst_groups = XFRMGRP_ACQUIRE;
1309 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_ACQUIRE, GFP_ATOMIC);
1312 /* User gives us xfrm_user_policy_info followed by an array of 0
1313 * or more templates.
1315 static struct xfrm_policy *xfrm_compile_policy(u16 family, int opt,
1316 u8 *data, int len, int *dir)
1318 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
1319 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
1320 struct xfrm_policy *xp;
1325 if (opt != IP_XFRM_POLICY) {
1330 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1332 if (opt != IPV6_XFRM_POLICY) {
1345 if (len < sizeof(*p) ||
1346 verify_newpolicy_info(p))
1349 nr = ((len - sizeof(*p)) / sizeof(*ut));
1350 if (nr > XFRM_MAX_DEPTH)
1353 xp = xfrm_policy_alloc(GFP_KERNEL);
1359 copy_from_user_policy(xp, p);
1360 copy_templates(xp, ut, nr);
1367 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
1370 struct xfrm_user_polexpire *upe;
1371 struct nlmsghdr *nlh;
1372 unsigned char *b = skb->tail;
1374 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe));
1375 upe = NLMSG_DATA(nlh);
1376 nlh->nlmsg_flags = 0;
1378 copy_to_user_policy(xp, &upe->pol, dir);
1379 if (copy_to_user_tmpl(xp, skb) < 0)
1383 nlh->nlmsg_len = skb->tail - b;
1387 skb_trim(skb, b - skb->data);
1391 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1393 struct sk_buff *skb;
1396 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1397 len += NLMSG_SPACE(sizeof(struct xfrm_user_polexpire));
1398 skb = alloc_skb(len, GFP_ATOMIC);
1402 if (build_polexpire(skb, xp, dir, c->data.hard) < 0)
1405 NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE;
1407 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
1410 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
1412 struct xfrm_userpolicy_info *p;
1413 struct xfrm_userpolicy_id *id;
1414 struct nlmsghdr *nlh;
1415 struct sk_buff *skb;
1417 int len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1420 headlen = sizeof(*p);
1421 if (c->event == XFRM_MSG_DELPOLICY) {
1422 len += RTA_SPACE(headlen);
1423 headlen = sizeof(*id);
1425 len += NLMSG_SPACE(headlen);
1427 skb = alloc_skb(len, GFP_ATOMIC);
1432 nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen);
1434 p = NLMSG_DATA(nlh);
1435 if (c->event == XFRM_MSG_DELPOLICY) {
1436 id = NLMSG_DATA(nlh);
1437 memset(id, 0, sizeof(*id));
1440 id->index = xp->index;
1442 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
1444 p = RTA_DATA(__RTA_PUT(skb, XFRMA_POLICY, sizeof(*p)));
1447 nlh->nlmsg_flags = 0;
1449 copy_to_user_policy(xp, p, dir);
1450 if (copy_to_user_tmpl(xp, skb) < 0)
1453 nlh->nlmsg_len = skb->tail - b;
1455 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_POLICY, GFP_ATOMIC);
1463 static int xfrm_notify_policy_flush(struct km_event *c)
1465 struct nlmsghdr *nlh;
1466 struct sk_buff *skb;
1468 int len = NLMSG_LENGTH(0);
1470 skb = alloc_skb(len, GFP_ATOMIC);
1476 nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0);
1478 nlh->nlmsg_len = skb->tail - b;
1480 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_POLICY, GFP_ATOMIC);
1487 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1491 case XFRM_MSG_NEWPOLICY:
1492 case XFRM_MSG_UPDPOLICY:
1493 case XFRM_MSG_DELPOLICY:
1494 return xfrm_notify_policy(xp, dir, c);
1495 case XFRM_MSG_FLUSHPOLICY:
1496 return xfrm_notify_policy_flush(c);
1497 case XFRM_MSG_POLEXPIRE:
1498 return xfrm_exp_policy_notify(xp, dir, c);
1500 printk("xfrm_user: Unknown Policy event %d\n", c->event);
1507 static struct xfrm_mgr netlink_mgr = {
1509 .notify = xfrm_send_state_notify,
1510 .acquire = xfrm_send_acquire,
1511 .compile_policy = xfrm_compile_policy,
1512 .notify_policy = xfrm_send_policy_notify,
1515 static int __init xfrm_user_init(void)
1517 printk(KERN_INFO "Initializing IPsec netlink socket\n");
1519 xfrm_nl = netlink_kernel_create(NETLINK_XFRM, xfrm_netlink_rcv);
1520 if (xfrm_nl == NULL)
1523 xfrm_register_km(&netlink_mgr);
1528 static void __exit xfrm_user_exit(void)
1530 xfrm_unregister_km(&netlink_mgr);
1531 sock_release(xfrm_nl->sk_socket);
1534 module_init(xfrm_user_init);
1535 module_exit(xfrm_user_exit);
1536 MODULE_LICENSE("GPL");