[NETFILTER]: x_tables: pass registered match/target data to match/target functions
[linux-2.6] / net / ipv4 / netfilter / ip_tables.c
1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12  *      - increase module usage count as soon as we have rules inside
13  *        a table
14  * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15  *      - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
16  */
17 #include <linux/config.h>
18 #include <linux/cache.h>
19 #include <linux/capability.h>
20 #include <linux/skbuff.h>
21 #include <linux/kmod.h>
22 #include <linux/vmalloc.h>
23 #include <linux/netdevice.h>
24 #include <linux/module.h>
25 #include <linux/icmp.h>
26 #include <net/ip.h>
27 #include <asm/uaccess.h>
28 #include <asm/semaphore.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
35
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
39
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
43
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...)  printk(format , ## args)
46 #else
47 #define dprintf(format, args...)
48 #endif
49
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
52 #else
53 #define duprintf(format, args...)
54 #endif
55
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x)                                         \
58 do {                                                            \
59         if (!(x))                                               \
60                 printk("IP_NF_ASSERT: %s:%s:%u\n",              \
61                        __FUNCTION__, __FILE__, __LINE__);       \
62 } while(0)
63 #else
64 #define IP_NF_ASSERT(x)
65 #endif
66
67 #if 0
68 /* All the better to debug you with... */
69 #define static
70 #define inline
71 #endif
72
73 /*
74    We keep a set of rules for each CPU, so we can avoid write-locking
75    them in the softirq when updating the counters and therefore
76    only need to read-lock in the softirq; doing a write_lock_bh() in user
77    context stops packets coming through and allows user context to read
78    the counters or update the rules.
79
80    Hence the start of any table is given by get_table() below.  */
81
82 /* Returns whether matches rule or not. */
83 static inline int
84 ip_packet_match(const struct iphdr *ip,
85                 const char *indev,
86                 const char *outdev,
87                 const struct ipt_ip *ipinfo,
88                 int isfrag)
89 {
90         size_t i;
91         unsigned long ret;
92
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
94
95         if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
96                   IPT_INV_SRCIP)
97             || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
98                      IPT_INV_DSTIP)) {
99                 dprintf("Source or dest mismatch.\n");
100
101                 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
102                         NIPQUAD(ip->saddr),
103                         NIPQUAD(ipinfo->smsk.s_addr),
104                         NIPQUAD(ipinfo->src.s_addr),
105                         ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106                 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
107                         NIPQUAD(ip->daddr),
108                         NIPQUAD(ipinfo->dmsk.s_addr),
109                         NIPQUAD(ipinfo->dst.s_addr),
110                         ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
111                 return 0;
112         }
113
114         /* Look for ifname matches; this should unroll nicely. */
115         for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116                 ret |= (((const unsigned long *)indev)[i]
117                         ^ ((const unsigned long *)ipinfo->iniface)[i])
118                         & ((const unsigned long *)ipinfo->iniface_mask)[i];
119         }
120
121         if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122                 dprintf("VIA in mismatch (%s vs %s).%s\n",
123                         indev, ipinfo->iniface,
124                         ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
125                 return 0;
126         }
127
128         for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129                 ret |= (((const unsigned long *)outdev)[i]
130                         ^ ((const unsigned long *)ipinfo->outiface)[i])
131                         & ((const unsigned long *)ipinfo->outiface_mask)[i];
132         }
133
134         if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135                 dprintf("VIA out mismatch (%s vs %s).%s\n",
136                         outdev, ipinfo->outiface,
137                         ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
138                 return 0;
139         }
140
141         /* Check specific protocol */
142         if (ipinfo->proto
143             && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144                 dprintf("Packet protocol %hi does not match %hi.%s\n",
145                         ip->protocol, ipinfo->proto,
146                         ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
147                 return 0;
148         }
149
150         /* If we have a fragment rule but the packet is not a fragment
151          * then we return zero */
152         if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153                 dprintf("Fragment rule but not fragment.%s\n",
154                         ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
155                 return 0;
156         }
157
158         return 1;
159 }
160
161 static inline int
162 ip_checkentry(const struct ipt_ip *ip)
163 {
164         if (ip->flags & ~IPT_F_MASK) {
165                 duprintf("Unknown flag bits set: %08X\n",
166                          ip->flags & ~IPT_F_MASK);
167                 return 0;
168         }
169         if (ip->invflags & ~IPT_INV_MASK) {
170                 duprintf("Unknown invflag bits set: %08X\n",
171                          ip->invflags & ~IPT_INV_MASK);
172                 return 0;
173         }
174         return 1;
175 }
176
177 static unsigned int
178 ipt_error(struct sk_buff **pskb,
179           const struct net_device *in,
180           const struct net_device *out,
181           unsigned int hooknum,
182           const void *targinfo,
183           void *userinfo)
184 {
185         if (net_ratelimit())
186                 printk("ip_tables: error: `%s'\n", (char *)targinfo);
187
188         return NF_DROP;
189 }
190
191 static inline
192 int do_match(struct ipt_entry_match *m,
193              const struct sk_buff *skb,
194              const struct net_device *in,
195              const struct net_device *out,
196              int offset,
197              int *hotdrop)
198 {
199         /* Stop iteration if it doesn't match */
200         if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201                                       offset, skb->nh.iph->ihl*4, hotdrop))
202                 return 1;
203         else
204                 return 0;
205 }
206
207 static inline struct ipt_entry *
208 get_entry(void *base, unsigned int offset)
209 {
210         return (struct ipt_entry *)(base + offset);
211 }
212
213 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
214 unsigned int
215 ipt_do_table(struct sk_buff **pskb,
216              unsigned int hook,
217              const struct net_device *in,
218              const struct net_device *out,
219              struct ipt_table *table,
220              void *userdata)
221 {
222         static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
223         u_int16_t offset;
224         struct iphdr *ip;
225         u_int16_t datalen;
226         int hotdrop = 0;
227         /* Initializing verdict to NF_DROP keeps gcc happy. */
228         unsigned int verdict = NF_DROP;
229         const char *indev, *outdev;
230         void *table_base;
231         struct ipt_entry *e, *back;
232         struct xt_table_info *private = table->private;
233
234         /* Initialization */
235         ip = (*pskb)->nh.iph;
236         datalen = (*pskb)->len - ip->ihl * 4;
237         indev = in ? in->name : nulldevname;
238         outdev = out ? out->name : nulldevname;
239         /* We handle fragments by dealing with the first fragment as
240          * if it was a normal packet.  All other fragments are treated
241          * normally, except that they will NEVER match rules that ask
242          * things we don't know, ie. tcp syn flag or ports).  If the
243          * rule is also a fragment-specific rule, non-fragments won't
244          * match it. */
245         offset = ntohs(ip->frag_off) & IP_OFFSET;
246
247         read_lock_bh(&table->lock);
248         IP_NF_ASSERT(table->valid_hooks & (1 << hook));
249         table_base = (void *)private->entries[smp_processor_id()];
250         e = get_entry(table_base, private->hook_entry[hook]);
251
252         /* For return from builtin chain */
253         back = get_entry(table_base, private->underflow[hook]);
254
255         do {
256                 IP_NF_ASSERT(e);
257                 IP_NF_ASSERT(back);
258                 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259                         struct ipt_entry_target *t;
260
261                         if (IPT_MATCH_ITERATE(e, do_match,
262                                               *pskb, in, out,
263                                               offset, &hotdrop) != 0)
264                                 goto no_match;
265
266                         ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
267
268                         t = ipt_get_target(e);
269                         IP_NF_ASSERT(t->u.kernel.target);
270                         /* Standard target? */
271                         if (!t->u.kernel.target->target) {
272                                 int v;
273
274                                 v = ((struct ipt_standard_target *)t)->verdict;
275                                 if (v < 0) {
276                                         /* Pop from stack? */
277                                         if (v != IPT_RETURN) {
278                                                 verdict = (unsigned)(-v) - 1;
279                                                 break;
280                                         }
281                                         e = back;
282                                         back = get_entry(table_base,
283                                                          back->comefrom);
284                                         continue;
285                                 }
286                                 if (table_base + v != (void *)e + e->next_offset
287                                     && !(e->ip.flags & IPT_F_GOTO)) {
288                                         /* Save old back ptr in next entry */
289                                         struct ipt_entry *next
290                                                 = (void *)e + e->next_offset;
291                                         next->comefrom
292                                                 = (void *)back - table_base;
293                                         /* set back pointer to next entry */
294                                         back = next;
295                                 }
296
297                                 e = get_entry(table_base, v);
298                         } else {
299                                 /* Targets which reenter must return
300                                    abs. verdicts */
301 #ifdef CONFIG_NETFILTER_DEBUG
302                                 ((struct ipt_entry *)table_base)->comefrom
303                                         = 0xeeeeeeec;
304 #endif
305                                 verdict = t->u.kernel.target->target(pskb,
306                                                                      in, out,
307                                                                      hook,
308                                                                      t->u.kernel.target,
309                                                                      t->data,
310                                                                      userdata);
311
312 #ifdef CONFIG_NETFILTER_DEBUG
313                                 if (((struct ipt_entry *)table_base)->comefrom
314                                     != 0xeeeeeeec
315                                     && verdict == IPT_CONTINUE) {
316                                         printk("Target %s reentered!\n",
317                                                t->u.kernel.target->name);
318                                         verdict = NF_DROP;
319                                 }
320                                 ((struct ipt_entry *)table_base)->comefrom
321                                         = 0x57acc001;
322 #endif
323                                 /* Target might have changed stuff. */
324                                 ip = (*pskb)->nh.iph;
325                                 datalen = (*pskb)->len - ip->ihl * 4;
326
327                                 if (verdict == IPT_CONTINUE)
328                                         e = (void *)e + e->next_offset;
329                                 else
330                                         /* Verdict */
331                                         break;
332                         }
333                 } else {
334
335                 no_match:
336                         e = (void *)e + e->next_offset;
337                 }
338         } while (!hotdrop);
339
340         read_unlock_bh(&table->lock);
341
342 #ifdef DEBUG_ALLOW_ALL
343         return NF_ACCEPT;
344 #else
345         if (hotdrop)
346                 return NF_DROP;
347         else return verdict;
348 #endif
349 }
350
351 /* All zeroes == unconditional rule. */
352 static inline int
353 unconditional(const struct ipt_ip *ip)
354 {
355         unsigned int i;
356
357         for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
358                 if (((__u32 *)ip)[i])
359                         return 0;
360
361         return 1;
362 }
363
364 /* Figures out from what hook each rule can be called: returns 0 if
365    there are loops.  Puts hook bitmask in comefrom. */
366 static int
367 mark_source_chains(struct xt_table_info *newinfo,
368                    unsigned int valid_hooks, void *entry0)
369 {
370         unsigned int hook;
371
372         /* No recursion; use packet counter to save back ptrs (reset
373            to 0 as we leave), and comefrom to save source hook bitmask */
374         for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
375                 unsigned int pos = newinfo->hook_entry[hook];
376                 struct ipt_entry *e
377                         = (struct ipt_entry *)(entry0 + pos);
378
379                 if (!(valid_hooks & (1 << hook)))
380                         continue;
381
382                 /* Set initial back pointer. */
383                 e->counters.pcnt = pos;
384
385                 for (;;) {
386                         struct ipt_standard_target *t
387                                 = (void *)ipt_get_target(e);
388
389                         if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
390                                 printk("iptables: loop hook %u pos %u %08X.\n",
391                                        hook, pos, e->comefrom);
392                                 return 0;
393                         }
394                         e->comefrom
395                                 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
396
397                         /* Unconditional return/END. */
398                         if (e->target_offset == sizeof(struct ipt_entry)
399                             && (strcmp(t->target.u.user.name,
400                                        IPT_STANDARD_TARGET) == 0)
401                             && t->verdict < 0
402                             && unconditional(&e->ip)) {
403                                 unsigned int oldpos, size;
404
405                                 /* Return: backtrack through the last
406                                    big jump. */
407                                 do {
408                                         e->comefrom ^= (1<<NF_IP_NUMHOOKS);
409 #ifdef DEBUG_IP_FIREWALL_USER
410                                         if (e->comefrom
411                                             & (1 << NF_IP_NUMHOOKS)) {
412                                                 duprintf("Back unset "
413                                                          "on hook %u "
414                                                          "rule %u\n",
415                                                          hook, pos);
416                                         }
417 #endif
418                                         oldpos = pos;
419                                         pos = e->counters.pcnt;
420                                         e->counters.pcnt = 0;
421
422                                         /* We're at the start. */
423                                         if (pos == oldpos)
424                                                 goto next;
425
426                                         e = (struct ipt_entry *)
427                                                 (entry0 + pos);
428                                 } while (oldpos == pos + e->next_offset);
429
430                                 /* Move along one */
431                                 size = e->next_offset;
432                                 e = (struct ipt_entry *)
433                                         (entry0 + pos + size);
434                                 e->counters.pcnt = pos;
435                                 pos += size;
436                         } else {
437                                 int newpos = t->verdict;
438
439                                 if (strcmp(t->target.u.user.name,
440                                            IPT_STANDARD_TARGET) == 0
441                                     && newpos >= 0) {
442                                         /* This a jump; chase it. */
443                                         duprintf("Jump rule %u -> %u\n",
444                                                  pos, newpos);
445                                 } else {
446                                         /* ... this is a fallthru */
447                                         newpos = pos + e->next_offset;
448                                 }
449                                 e = (struct ipt_entry *)
450                                         (entry0 + newpos);
451                                 e->counters.pcnt = pos;
452                                 pos = newpos;
453                         }
454                 }
455                 next:
456                 duprintf("Finished chain %u\n", hook);
457         }
458         return 1;
459 }
460
461 static inline int
462 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
463 {
464         if (i && (*i)-- == 0)
465                 return 1;
466
467         if (m->u.kernel.match->destroy)
468                 m->u.kernel.match->destroy(m->u.kernel.match, m->data,
469                                            m->u.match_size - sizeof(*m));
470         module_put(m->u.kernel.match->me);
471         return 0;
472 }
473
474 static inline int
475 standard_check(const struct ipt_entry_target *t,
476                unsigned int max_offset)
477 {
478         struct ipt_standard_target *targ = (void *)t;
479
480         /* Check standard info. */
481         if (targ->verdict >= 0
482             && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
483                 duprintf("ipt_standard_check: bad verdict (%i)\n",
484                          targ->verdict);
485                 return 0;
486         }
487         if (targ->verdict < -NF_MAX_VERDICT - 1) {
488                 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
489                          targ->verdict);
490                 return 0;
491         }
492         return 1;
493 }
494
495 static inline int
496 check_match(struct ipt_entry_match *m,
497             const char *name,
498             const struct ipt_ip *ip,
499             unsigned int hookmask,
500             unsigned int *i)
501 {
502         struct ipt_match *match;
503         int ret;
504
505         match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
506                                                    m->u.user.revision),
507                                         "ipt_%s", m->u.user.name);
508         if (IS_ERR(match) || !match) {
509                 duprintf("check_match: `%s' not found\n", m->u.user.name);
510                 return match ? PTR_ERR(match) : -ENOENT;
511         }
512         m->u.kernel.match = match;
513
514         ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
515                              name, hookmask, ip->proto,
516                              ip->invflags & IPT_INV_PROTO);
517         if (ret)
518                 goto err;
519
520         if (m->u.kernel.match->checkentry
521             && !m->u.kernel.match->checkentry(name, ip, match, m->data,
522                                               m->u.match_size - sizeof(*m),
523                                               hookmask)) {
524                 duprintf("ip_tables: check failed for `%s'.\n",
525                          m->u.kernel.match->name);
526                 ret = -EINVAL;
527                 goto err;
528         }
529
530         (*i)++;
531         return 0;
532 err:
533         module_put(m->u.kernel.match->me);
534         return ret;
535 }
536
537 static struct ipt_target ipt_standard_target;
538
539 static inline int
540 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
541             unsigned int *i)
542 {
543         struct ipt_entry_target *t;
544         struct ipt_target *target;
545         int ret;
546         unsigned int j;
547
548         if (!ip_checkentry(&e->ip)) {
549                 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
550                 return -EINVAL;
551         }
552
553         j = 0;
554         ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
555         if (ret != 0)
556                 goto cleanup_matches;
557
558         t = ipt_get_target(e);
559         target = try_then_request_module(xt_find_target(AF_INET,
560                                                      t->u.user.name,
561                                                      t->u.user.revision),
562                                          "ipt_%s", t->u.user.name);
563         if (IS_ERR(target) || !target) {
564                 duprintf("check_entry: `%s' not found\n", t->u.user.name);
565                 ret = target ? PTR_ERR(target) : -ENOENT;
566                 goto cleanup_matches;
567         }
568         t->u.kernel.target = target;
569
570         ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
571                               name, e->comefrom, e->ip.proto,
572                               e->ip.invflags & IPT_INV_PROTO);
573         if (ret)
574                 goto err;
575
576         if (t->u.kernel.target == &ipt_standard_target) {
577                 if (!standard_check(t, size)) {
578                         ret = -EINVAL;
579                         goto cleanup_matches;
580                 }
581         } else if (t->u.kernel.target->checkentry
582                    && !t->u.kernel.target->checkentry(name, e, target, t->data,
583                                                       t->u.target_size
584                                                       - sizeof(*t),
585                                                       e->comefrom)) {
586                 duprintf("ip_tables: check failed for `%s'.\n",
587                          t->u.kernel.target->name);
588                 ret = -EINVAL;
589                 goto err;
590         }
591
592         (*i)++;
593         return 0;
594  err:
595         module_put(t->u.kernel.target->me);
596  cleanup_matches:
597         IPT_MATCH_ITERATE(e, cleanup_match, &j);
598         return ret;
599 }
600
601 static inline int
602 check_entry_size_and_hooks(struct ipt_entry *e,
603                            struct xt_table_info *newinfo,
604                            unsigned char *base,
605                            unsigned char *limit,
606                            const unsigned int *hook_entries,
607                            const unsigned int *underflows,
608                            unsigned int *i)
609 {
610         unsigned int h;
611
612         if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
613             || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
614                 duprintf("Bad offset %p\n", e);
615                 return -EINVAL;
616         }
617
618         if (e->next_offset
619             < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
620                 duprintf("checking: element %p size %u\n",
621                          e, e->next_offset);
622                 return -EINVAL;
623         }
624
625         /* Check hooks & underflows */
626         for (h = 0; h < NF_IP_NUMHOOKS; h++) {
627                 if ((unsigned char *)e - base == hook_entries[h])
628                         newinfo->hook_entry[h] = hook_entries[h];
629                 if ((unsigned char *)e - base == underflows[h])
630                         newinfo->underflow[h] = underflows[h];
631         }
632
633         /* FIXME: underflows must be unconditional, standard verdicts
634            < 0 (not IPT_RETURN). --RR */
635
636         /* Clear counters and comefrom */
637         e->counters = ((struct xt_counters) { 0, 0 });
638         e->comefrom = 0;
639
640         (*i)++;
641         return 0;
642 }
643
644 static inline int
645 cleanup_entry(struct ipt_entry *e, unsigned int *i)
646 {
647         struct ipt_entry_target *t;
648
649         if (i && (*i)-- == 0)
650                 return 1;
651
652         /* Cleanup all matches */
653         IPT_MATCH_ITERATE(e, cleanup_match, NULL);
654         t = ipt_get_target(e);
655         if (t->u.kernel.target->destroy)
656                 t->u.kernel.target->destroy(t->u.kernel.target, t->data,
657                                             t->u.target_size - sizeof(*t));
658         module_put(t->u.kernel.target->me);
659         return 0;
660 }
661
662 /* Checks and translates the user-supplied table segment (held in
663    newinfo) */
664 static int
665 translate_table(const char *name,
666                 unsigned int valid_hooks,
667                 struct xt_table_info *newinfo,
668                 void *entry0,
669                 unsigned int size,
670                 unsigned int number,
671                 const unsigned int *hook_entries,
672                 const unsigned int *underflows)
673 {
674         unsigned int i;
675         int ret;
676
677         newinfo->size = size;
678         newinfo->number = number;
679
680         /* Init all hooks to impossible value. */
681         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
682                 newinfo->hook_entry[i] = 0xFFFFFFFF;
683                 newinfo->underflow[i] = 0xFFFFFFFF;
684         }
685
686         duprintf("translate_table: size %u\n", newinfo->size);
687         i = 0;
688         /* Walk through entries, checking offsets. */
689         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
690                                 check_entry_size_and_hooks,
691                                 newinfo,
692                                 entry0,
693                                 entry0 + size,
694                                 hook_entries, underflows, &i);
695         if (ret != 0)
696                 return ret;
697
698         if (i != number) {
699                 duprintf("translate_table: %u not %u entries\n",
700                          i, number);
701                 return -EINVAL;
702         }
703
704         /* Check hooks all assigned */
705         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
706                 /* Only hooks which are valid */
707                 if (!(valid_hooks & (1 << i)))
708                         continue;
709                 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
710                         duprintf("Invalid hook entry %u %u\n",
711                                  i, hook_entries[i]);
712                         return -EINVAL;
713                 }
714                 if (newinfo->underflow[i] == 0xFFFFFFFF) {
715                         duprintf("Invalid underflow %u %u\n",
716                                  i, underflows[i]);
717                         return -EINVAL;
718                 }
719         }
720
721         if (!mark_source_chains(newinfo, valid_hooks, entry0))
722                 return -ELOOP;
723
724         /* Finally, each sanity check must pass */
725         i = 0;
726         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
727                                 check_entry, name, size, &i);
728
729         if (ret != 0) {
730                 IPT_ENTRY_ITERATE(entry0, newinfo->size,
731                                   cleanup_entry, &i);
732                 return ret;
733         }
734
735         /* And one copy for every other CPU */
736         for_each_cpu(i) {
737                 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
738                         memcpy(newinfo->entries[i], entry0, newinfo->size);
739         }
740
741         return ret;
742 }
743
744 /* Gets counters. */
745 static inline int
746 add_entry_to_counter(const struct ipt_entry *e,
747                      struct xt_counters total[],
748                      unsigned int *i)
749 {
750         ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
751
752         (*i)++;
753         return 0;
754 }
755
756 static inline int
757 set_entry_to_counter(const struct ipt_entry *e,
758                      struct ipt_counters total[],
759                      unsigned int *i)
760 {
761         SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
762
763         (*i)++;
764         return 0;
765 }
766
767 static void
768 get_counters(const struct xt_table_info *t,
769              struct xt_counters counters[])
770 {
771         unsigned int cpu;
772         unsigned int i;
773         unsigned int curcpu;
774
775         /* Instead of clearing (by a previous call to memset())
776          * the counters and using adds, we set the counters
777          * with data used by 'current' CPU
778          * We dont care about preemption here.
779          */
780         curcpu = raw_smp_processor_id();
781
782         i = 0;
783         IPT_ENTRY_ITERATE(t->entries[curcpu],
784                           t->size,
785                           set_entry_to_counter,
786                           counters,
787                           &i);
788
789         for_each_cpu(cpu) {
790                 if (cpu == curcpu)
791                         continue;
792                 i = 0;
793                 IPT_ENTRY_ITERATE(t->entries[cpu],
794                                   t->size,
795                                   add_entry_to_counter,
796                                   counters,
797                                   &i);
798         }
799 }
800
801 static int
802 copy_entries_to_user(unsigned int total_size,
803                      struct ipt_table *table,
804                      void __user *userptr)
805 {
806         unsigned int off, num, countersize;
807         struct ipt_entry *e;
808         struct xt_counters *counters;
809         struct xt_table_info *private = table->private;
810         int ret = 0;
811         void *loc_cpu_entry;
812
813         /* We need atomic snapshot of counters: rest doesn't change
814            (other than comefrom, which userspace doesn't care
815            about). */
816         countersize = sizeof(struct xt_counters) * private->number;
817         counters = vmalloc_node(countersize, numa_node_id());
818
819         if (counters == NULL)
820                 return -ENOMEM;
821
822         /* First, sum counters... */
823         write_lock_bh(&table->lock);
824         get_counters(private, counters);
825         write_unlock_bh(&table->lock);
826
827         /* choose the copy that is on our node/cpu, ...
828          * This choice is lazy (because current thread is
829          * allowed to migrate to another cpu)
830          */
831         loc_cpu_entry = private->entries[raw_smp_processor_id()];
832         /* ... then copy entire thing ... */
833         if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
834                 ret = -EFAULT;
835                 goto free_counters;
836         }
837
838         /* FIXME: use iterator macros --RR */
839         /* ... then go back and fix counters and names */
840         for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
841                 unsigned int i;
842                 struct ipt_entry_match *m;
843                 struct ipt_entry_target *t;
844
845                 e = (struct ipt_entry *)(loc_cpu_entry + off);
846                 if (copy_to_user(userptr + off
847                                  + offsetof(struct ipt_entry, counters),
848                                  &counters[num],
849                                  sizeof(counters[num])) != 0) {
850                         ret = -EFAULT;
851                         goto free_counters;
852                 }
853
854                 for (i = sizeof(struct ipt_entry);
855                      i < e->target_offset;
856                      i += m->u.match_size) {
857                         m = (void *)e + i;
858
859                         if (copy_to_user(userptr + off + i
860                                          + offsetof(struct ipt_entry_match,
861                                                     u.user.name),
862                                          m->u.kernel.match->name,
863                                          strlen(m->u.kernel.match->name)+1)
864                             != 0) {
865                                 ret = -EFAULT;
866                                 goto free_counters;
867                         }
868                 }
869
870                 t = ipt_get_target(e);
871                 if (copy_to_user(userptr + off + e->target_offset
872                                  + offsetof(struct ipt_entry_target,
873                                             u.user.name),
874                                  t->u.kernel.target->name,
875                                  strlen(t->u.kernel.target->name)+1) != 0) {
876                         ret = -EFAULT;
877                         goto free_counters;
878                 }
879         }
880
881  free_counters:
882         vfree(counters);
883         return ret;
884 }
885
886 static int
887 get_entries(const struct ipt_get_entries *entries,
888             struct ipt_get_entries __user *uptr)
889 {
890         int ret;
891         struct ipt_table *t;
892
893         t = xt_find_table_lock(AF_INET, entries->name);
894         if (t && !IS_ERR(t)) {
895                 struct xt_table_info *private = t->private;
896                 duprintf("t->private->number = %u\n",
897                          private->number);
898                 if (entries->size == private->size)
899                         ret = copy_entries_to_user(private->size,
900                                                    t, uptr->entrytable);
901                 else {
902                         duprintf("get_entries: I've got %u not %u!\n",
903                                  private->size,
904                                  entries->size);
905                         ret = -EINVAL;
906                 }
907                 module_put(t->me);
908                 xt_table_unlock(t);
909         } else
910                 ret = t ? PTR_ERR(t) : -ENOENT;
911
912         return ret;
913 }
914
915 static int
916 do_replace(void __user *user, unsigned int len)
917 {
918         int ret;
919         struct ipt_replace tmp;
920         struct ipt_table *t;
921         struct xt_table_info *newinfo, *oldinfo;
922         struct xt_counters *counters;
923         void *loc_cpu_entry, *loc_cpu_old_entry;
924
925         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
926                 return -EFAULT;
927
928         /* Hack: Causes ipchains to give correct error msg --RR */
929         if (len != sizeof(tmp) + tmp.size)
930                 return -ENOPROTOOPT;
931
932         /* overflow check */
933         if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
934                         SMP_CACHE_BYTES)
935                 return -ENOMEM;
936         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
937                 return -ENOMEM;
938
939         newinfo = xt_alloc_table_info(tmp.size);
940         if (!newinfo)
941                 return -ENOMEM;
942
943         /* choose the copy that is our node/cpu */
944         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
945         if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
946                            tmp.size) != 0) {
947                 ret = -EFAULT;
948                 goto free_newinfo;
949         }
950
951         counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
952         if (!counters) {
953                 ret = -ENOMEM;
954                 goto free_newinfo;
955         }
956
957         ret = translate_table(tmp.name, tmp.valid_hooks,
958                               newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
959                               tmp.hook_entry, tmp.underflow);
960         if (ret != 0)
961                 goto free_newinfo_counters;
962
963         duprintf("ip_tables: Translated table\n");
964
965         t = try_then_request_module(xt_find_table_lock(AF_INET, tmp.name),
966                                     "iptable_%s", tmp.name);
967         if (!t || IS_ERR(t)) {
968                 ret = t ? PTR_ERR(t) : -ENOENT;
969                 goto free_newinfo_counters_untrans;
970         }
971
972         /* You lied! */
973         if (tmp.valid_hooks != t->valid_hooks) {
974                 duprintf("Valid hook crap: %08X vs %08X\n",
975                          tmp.valid_hooks, t->valid_hooks);
976                 ret = -EINVAL;
977                 goto put_module;
978         }
979
980         oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
981         if (!oldinfo)
982                 goto put_module;
983
984         /* Update module usage count based on number of rules */
985         duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
986                 oldinfo->number, oldinfo->initial_entries, newinfo->number);
987         if ((oldinfo->number > oldinfo->initial_entries) || 
988             (newinfo->number <= oldinfo->initial_entries)) 
989                 module_put(t->me);
990         if ((oldinfo->number > oldinfo->initial_entries) &&
991             (newinfo->number <= oldinfo->initial_entries))
992                 module_put(t->me);
993
994         /* Get the old counters. */
995         get_counters(oldinfo, counters);
996         /* Decrease module usage counts and free resource */
997         loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
998         IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
999         xt_free_table_info(oldinfo);
1000         if (copy_to_user(tmp.counters, counters,
1001                          sizeof(struct xt_counters) * tmp.num_counters) != 0)
1002                 ret = -EFAULT;
1003         vfree(counters);
1004         xt_table_unlock(t);
1005         return ret;
1006
1007  put_module:
1008         module_put(t->me);
1009         xt_table_unlock(t);
1010  free_newinfo_counters_untrans:
1011         IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1012  free_newinfo_counters:
1013         vfree(counters);
1014  free_newinfo:
1015         xt_free_table_info(newinfo);
1016         return ret;
1017 }
1018
1019 /* We're lazy, and add to the first CPU; overflow works its fey magic
1020  * and everything is OK. */
1021 static inline int
1022 add_counter_to_entry(struct ipt_entry *e,
1023                      const struct xt_counters addme[],
1024                      unsigned int *i)
1025 {
1026 #if 0
1027         duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1028                  *i,
1029                  (long unsigned int)e->counters.pcnt,
1030                  (long unsigned int)e->counters.bcnt,
1031                  (long unsigned int)addme[*i].pcnt,
1032                  (long unsigned int)addme[*i].bcnt);
1033 #endif
1034
1035         ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1036
1037         (*i)++;
1038         return 0;
1039 }
1040
1041 static int
1042 do_add_counters(void __user *user, unsigned int len)
1043 {
1044         unsigned int i;
1045         struct xt_counters_info tmp, *paddc;
1046         struct ipt_table *t;
1047         struct xt_table_info *private;
1048         int ret = 0;
1049         void *loc_cpu_entry;
1050
1051         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1052                 return -EFAULT;
1053
1054         if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
1055                 return -EINVAL;
1056
1057         paddc = vmalloc_node(len, numa_node_id());
1058         if (!paddc)
1059                 return -ENOMEM;
1060
1061         if (copy_from_user(paddc, user, len) != 0) {
1062                 ret = -EFAULT;
1063                 goto free;
1064         }
1065
1066         t = xt_find_table_lock(AF_INET, tmp.name);
1067         if (!t || IS_ERR(t)) {
1068                 ret = t ? PTR_ERR(t) : -ENOENT;
1069                 goto free;
1070         }
1071
1072         write_lock_bh(&t->lock);
1073         private = t->private;
1074         if (private->number != paddc->num_counters) {
1075                 ret = -EINVAL;
1076                 goto unlock_up_free;
1077         }
1078
1079         i = 0;
1080         /* Choose the copy that is on our node */
1081         loc_cpu_entry = private->entries[raw_smp_processor_id()];
1082         IPT_ENTRY_ITERATE(loc_cpu_entry,
1083                           private->size,
1084                           add_counter_to_entry,
1085                           paddc->counters,
1086                           &i);
1087  unlock_up_free:
1088         write_unlock_bh(&t->lock);
1089         xt_table_unlock(t);
1090         module_put(t->me);
1091  free:
1092         vfree(paddc);
1093
1094         return ret;
1095 }
1096
1097 static int
1098 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1099 {
1100         int ret;
1101
1102         if (!capable(CAP_NET_ADMIN))
1103                 return -EPERM;
1104
1105         switch (cmd) {
1106         case IPT_SO_SET_REPLACE:
1107                 ret = do_replace(user, len);
1108                 break;
1109
1110         case IPT_SO_SET_ADD_COUNTERS:
1111                 ret = do_add_counters(user, len);
1112                 break;
1113
1114         default:
1115                 duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1116                 ret = -EINVAL;
1117         }
1118
1119         return ret;
1120 }
1121
1122 static int
1123 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1124 {
1125         int ret;
1126
1127         if (!capable(CAP_NET_ADMIN))
1128                 return -EPERM;
1129
1130         switch (cmd) {
1131         case IPT_SO_GET_INFO: {
1132                 char name[IPT_TABLE_MAXNAMELEN];
1133                 struct ipt_table *t;
1134
1135                 if (*len != sizeof(struct ipt_getinfo)) {
1136                         duprintf("length %u != %u\n", *len,
1137                                  sizeof(struct ipt_getinfo));
1138                         ret = -EINVAL;
1139                         break;
1140                 }
1141
1142                 if (copy_from_user(name, user, sizeof(name)) != 0) {
1143                         ret = -EFAULT;
1144                         break;
1145                 }
1146                 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1147
1148                 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1149                                             "iptable_%s", name);
1150                 if (t && !IS_ERR(t)) {
1151                         struct ipt_getinfo info;
1152                         struct xt_table_info *private = t->private;
1153
1154                         info.valid_hooks = t->valid_hooks;
1155                         memcpy(info.hook_entry, private->hook_entry,
1156                                sizeof(info.hook_entry));
1157                         memcpy(info.underflow, private->underflow,
1158                                sizeof(info.underflow));
1159                         info.num_entries = private->number;
1160                         info.size = private->size;
1161                         memcpy(info.name, name, sizeof(info.name));
1162
1163                         if (copy_to_user(user, &info, *len) != 0)
1164                                 ret = -EFAULT;
1165                         else
1166                                 ret = 0;
1167                         xt_table_unlock(t);
1168                         module_put(t->me);
1169                 } else
1170                         ret = t ? PTR_ERR(t) : -ENOENT;
1171         }
1172         break;
1173
1174         case IPT_SO_GET_ENTRIES: {
1175                 struct ipt_get_entries get;
1176
1177                 if (*len < sizeof(get)) {
1178                         duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1179                         ret = -EINVAL;
1180                 } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
1181                         ret = -EFAULT;
1182                 } else if (*len != sizeof(struct ipt_get_entries) + get.size) {
1183                         duprintf("get_entries: %u != %u\n", *len,
1184                                  sizeof(struct ipt_get_entries) + get.size);
1185                         ret = -EINVAL;
1186                 } else
1187                         ret = get_entries(&get, user);
1188                 break;
1189         }
1190
1191         case IPT_SO_GET_REVISION_MATCH:
1192         case IPT_SO_GET_REVISION_TARGET: {
1193                 struct ipt_get_revision rev;
1194                 int target;
1195
1196                 if (*len != sizeof(rev)) {
1197                         ret = -EINVAL;
1198                         break;
1199                 }
1200                 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1201                         ret = -EFAULT;
1202                         break;
1203                 }
1204
1205                 if (cmd == IPT_SO_GET_REVISION_TARGET)
1206                         target = 1;
1207                 else
1208                         target = 0;
1209
1210                 try_then_request_module(xt_find_revision(AF_INET, rev.name,
1211                                                          rev.revision,
1212                                                          target, &ret),
1213                                         "ipt_%s", rev.name);
1214                 break;
1215         }
1216
1217         default:
1218                 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
1219                 ret = -EINVAL;
1220         }
1221
1222         return ret;
1223 }
1224
1225 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
1226 {
1227         int ret;
1228         struct xt_table_info *newinfo;
1229         static struct xt_table_info bootstrap
1230                 = { 0, 0, 0, { 0 }, { 0 }, { } };
1231         void *loc_cpu_entry;
1232
1233         newinfo = xt_alloc_table_info(repl->size);
1234         if (!newinfo)
1235                 return -ENOMEM;
1236
1237         /* choose the copy on our node/cpu
1238          * but dont care of preemption
1239          */
1240         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1241         memcpy(loc_cpu_entry, repl->entries, repl->size);
1242
1243         ret = translate_table(table->name, table->valid_hooks,
1244                               newinfo, loc_cpu_entry, repl->size,
1245                               repl->num_entries,
1246                               repl->hook_entry,
1247                               repl->underflow);
1248         if (ret != 0) {
1249                 xt_free_table_info(newinfo);
1250                 return ret;
1251         }
1252
1253         if (xt_register_table(table, &bootstrap, newinfo) != 0) {
1254                 xt_free_table_info(newinfo);
1255                 return ret;
1256         }
1257
1258         return 0;
1259 }
1260
1261 void ipt_unregister_table(struct ipt_table *table)
1262 {
1263         struct xt_table_info *private;
1264         void *loc_cpu_entry;
1265
1266         private = xt_unregister_table(table);
1267
1268         /* Decrease module usage counts and free resources */
1269         loc_cpu_entry = private->entries[raw_smp_processor_id()];
1270         IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
1271         xt_free_table_info(private);
1272 }
1273
1274 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1275 static inline int
1276 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1277                      u_int8_t type, u_int8_t code,
1278                      int invert)
1279 {
1280         return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
1281                 ^ invert;
1282 }
1283
1284 static int
1285 icmp_match(const struct sk_buff *skb,
1286            const struct net_device *in,
1287            const struct net_device *out,
1288            const void *matchinfo,
1289            int offset,
1290            unsigned int protoff,
1291            int *hotdrop)
1292 {
1293         struct icmphdr _icmph, *ic;
1294         const struct ipt_icmp *icmpinfo = matchinfo;
1295
1296         /* Must not be a fragment. */
1297         if (offset)
1298                 return 0;
1299
1300         ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
1301         if (ic == NULL) {
1302                 /* We've been asked to examine this packet, and we
1303                  * can't.  Hence, no choice but to drop.
1304                  */
1305                 duprintf("Dropping evil ICMP tinygram.\n");
1306                 *hotdrop = 1;
1307                 return 0;
1308         }
1309
1310         return icmp_type_code_match(icmpinfo->type,
1311                                     icmpinfo->code[0],
1312                                     icmpinfo->code[1],
1313                                     ic->type, ic->code,
1314                                     !!(icmpinfo->invflags&IPT_ICMP_INV));
1315 }
1316
1317 /* Called when user tries to insert an entry of this type. */
1318 static int
1319 icmp_checkentry(const char *tablename,
1320            const void *info,
1321            void *matchinfo,
1322            unsigned int matchsize,
1323            unsigned int hook_mask)
1324 {
1325         const struct ipt_icmp *icmpinfo = matchinfo;
1326
1327         /* Must specify no unknown invflags */
1328         return !(icmpinfo->invflags & ~IPT_ICMP_INV);
1329 }
1330
1331 /* The built-in targets: standard (NULL) and error. */
1332 static struct ipt_target ipt_standard_target = {
1333         .name           = IPT_STANDARD_TARGET,
1334         .targetsize     = sizeof(int),
1335 };
1336
1337 static struct ipt_target ipt_error_target = {
1338         .name           = IPT_ERROR_TARGET,
1339         .target         = ipt_error,
1340         .targetsize     = IPT_FUNCTION_MAXNAMELEN,
1341 };
1342
1343 static struct nf_sockopt_ops ipt_sockopts = {
1344         .pf             = PF_INET,
1345         .set_optmin     = IPT_BASE_CTL,
1346         .set_optmax     = IPT_SO_SET_MAX+1,
1347         .set            = do_ipt_set_ctl,
1348         .get_optmin     = IPT_BASE_CTL,
1349         .get_optmax     = IPT_SO_GET_MAX+1,
1350         .get            = do_ipt_get_ctl,
1351 };
1352
1353 static struct ipt_match icmp_matchstruct = {
1354         .name           = "icmp",
1355         .match          = icmp_match,
1356         .matchsize      = sizeof(struct ipt_icmp),
1357         .proto          = IPPROTO_ICMP,
1358         .checkentry     = icmp_checkentry,
1359 };
1360
1361 static int __init init(void)
1362 {
1363         int ret;
1364
1365         xt_proto_init(AF_INET);
1366
1367         /* Noone else will be downing sem now, so we won't sleep */
1368         xt_register_target(AF_INET, &ipt_standard_target);
1369         xt_register_target(AF_INET, &ipt_error_target);
1370         xt_register_match(AF_INET, &icmp_matchstruct);
1371
1372         /* Register setsockopt */
1373         ret = nf_register_sockopt(&ipt_sockopts);
1374         if (ret < 0) {
1375                 duprintf("Unable to register sockopts.\n");
1376                 return ret;
1377         }
1378
1379         printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
1380         return 0;
1381 }
1382
1383 static void __exit fini(void)
1384 {
1385         nf_unregister_sockopt(&ipt_sockopts);
1386
1387         xt_unregister_match(AF_INET, &icmp_matchstruct);
1388         xt_unregister_target(AF_INET, &ipt_error_target);
1389         xt_unregister_target(AF_INET, &ipt_standard_target);
1390
1391         xt_proto_fini(AF_INET);
1392 }
1393
1394 EXPORT_SYMBOL(ipt_register_table);
1395 EXPORT_SYMBOL(ipt_unregister_table);
1396 EXPORT_SYMBOL(ipt_do_table);
1397 module_init(init);
1398 module_exit(fini);