2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the NetFilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/types.h>
24 #include <linux/capability.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/swap.h>
30 #include <linux/seq_file.h>
32 #include <linux/netfilter.h>
33 #include <linux/netfilter_ipv4.h>
34 #include <linux/mutex.h>
36 #include <net/net_namespace.h>
38 #include <net/route.h>
41 #include <asm/uaccess.h>
43 #include <net/ip_vs.h>
45 /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
46 static DEFINE_MUTEX(__ip_vs_mutex);
48 /* lock for service table */
49 static DEFINE_RWLOCK(__ip_vs_svc_lock);
51 /* lock for table with the real services */
52 static DEFINE_RWLOCK(__ip_vs_rs_lock);
54 /* lock for state and timeout tables */
55 static DEFINE_RWLOCK(__ip_vs_securetcp_lock);
57 /* lock for drop entry handling */
58 static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
60 /* lock for drop packet handling */
61 static DEFINE_SPINLOCK(__ip_vs_droppacket_lock);
63 /* 1/rate drop and drop-entry variables */
64 int ip_vs_drop_rate = 0;
65 int ip_vs_drop_counter = 0;
66 static atomic_t ip_vs_dropentry = ATOMIC_INIT(0);
68 /* number of virtual services */
69 static int ip_vs_num_services = 0;
71 /* sysctl variables */
72 static int sysctl_ip_vs_drop_entry = 0;
73 static int sysctl_ip_vs_drop_packet = 0;
74 static int sysctl_ip_vs_secure_tcp = 0;
75 static int sysctl_ip_vs_amemthresh = 1024;
76 static int sysctl_ip_vs_am_droprate = 10;
77 int sysctl_ip_vs_cache_bypass = 0;
78 int sysctl_ip_vs_expire_nodest_conn = 0;
79 int sysctl_ip_vs_expire_quiescent_template = 0;
80 int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
81 int sysctl_ip_vs_nat_icmp_send = 0;
84 #ifdef CONFIG_IP_VS_DEBUG
85 static int sysctl_ip_vs_debug_level = 0;
87 int ip_vs_get_debug_level(void)
89 return sysctl_ip_vs_debug_level;
94 * update_defense_level is called from keventd and from sysctl,
95 * so it needs to protect itself from softirqs
97 static void update_defense_level(void)
100 static int old_secure_tcp = 0;
105 /* we only count free and buffered memory (in pages) */
107 availmem = i.freeram + i.bufferram;
108 /* however in linux 2.5 the i.bufferram is total page cache size,
110 /* si_swapinfo(&i); */
111 /* availmem = availmem - (i.totalswap - i.freeswap); */
113 nomem = (availmem < sysctl_ip_vs_amemthresh);
118 spin_lock(&__ip_vs_dropentry_lock);
119 switch (sysctl_ip_vs_drop_entry) {
121 atomic_set(&ip_vs_dropentry, 0);
125 atomic_set(&ip_vs_dropentry, 1);
126 sysctl_ip_vs_drop_entry = 2;
128 atomic_set(&ip_vs_dropentry, 0);
133 atomic_set(&ip_vs_dropentry, 1);
135 atomic_set(&ip_vs_dropentry, 0);
136 sysctl_ip_vs_drop_entry = 1;
140 atomic_set(&ip_vs_dropentry, 1);
143 spin_unlock(&__ip_vs_dropentry_lock);
146 spin_lock(&__ip_vs_droppacket_lock);
147 switch (sysctl_ip_vs_drop_packet) {
153 ip_vs_drop_rate = ip_vs_drop_counter
154 = sysctl_ip_vs_amemthresh /
155 (sysctl_ip_vs_amemthresh-availmem);
156 sysctl_ip_vs_drop_packet = 2;
163 ip_vs_drop_rate = ip_vs_drop_counter
164 = sysctl_ip_vs_amemthresh /
165 (sysctl_ip_vs_amemthresh-availmem);
168 sysctl_ip_vs_drop_packet = 1;
172 ip_vs_drop_rate = sysctl_ip_vs_am_droprate;
175 spin_unlock(&__ip_vs_droppacket_lock);
178 write_lock(&__ip_vs_securetcp_lock);
179 switch (sysctl_ip_vs_secure_tcp) {
181 if (old_secure_tcp >= 2)
186 if (old_secure_tcp < 2)
188 sysctl_ip_vs_secure_tcp = 2;
190 if (old_secure_tcp >= 2)
196 if (old_secure_tcp < 2)
199 if (old_secure_tcp >= 2)
201 sysctl_ip_vs_secure_tcp = 1;
205 if (old_secure_tcp < 2)
209 old_secure_tcp = sysctl_ip_vs_secure_tcp;
211 ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
212 write_unlock(&__ip_vs_securetcp_lock);
219 * Timer for checking the defense
221 #define DEFENSE_TIMER_PERIOD 1*HZ
222 static void defense_work_handler(struct work_struct *work);
223 static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
225 static void defense_work_handler(struct work_struct *work)
227 update_defense_level();
228 if (atomic_read(&ip_vs_dropentry))
229 ip_vs_random_dropentry();
231 schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
235 ip_vs_use_count_inc(void)
237 return try_module_get(THIS_MODULE);
241 ip_vs_use_count_dec(void)
243 module_put(THIS_MODULE);
248 * Hash table: for virtual service lookups
250 #define IP_VS_SVC_TAB_BITS 8
251 #define IP_VS_SVC_TAB_SIZE (1 << IP_VS_SVC_TAB_BITS)
252 #define IP_VS_SVC_TAB_MASK (IP_VS_SVC_TAB_SIZE - 1)
254 /* the service table hashed by <protocol, addr, port> */
255 static struct list_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE];
256 /* the service table hashed by fwmark */
257 static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
260 * Hash table: for real service lookups
262 #define IP_VS_RTAB_BITS 4
263 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
264 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
266 static struct list_head ip_vs_rtable[IP_VS_RTAB_SIZE];
269 * Trash for destinations
271 static LIST_HEAD(ip_vs_dest_trash);
274 * FTP & NULL virtual service counters
276 static atomic_t ip_vs_ftpsvc_counter = ATOMIC_INIT(0);
277 static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0);
281 * Returns hash value for virtual service
283 static __inline__ unsigned
284 ip_vs_svc_hashkey(unsigned proto, __be32 addr, __be16 port)
286 register unsigned porth = ntohs(port);
288 return (proto^ntohl(addr)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
289 & IP_VS_SVC_TAB_MASK;
293 * Returns hash value of fwmark for virtual service lookup
295 static __inline__ unsigned ip_vs_svc_fwm_hashkey(__u32 fwmark)
297 return fwmark & IP_VS_SVC_TAB_MASK;
301 * Hashes a service in the ip_vs_svc_table by <proto,addr,port>
302 * or in the ip_vs_svc_fwm_table by fwmark.
303 * Should be called with locked tables.
305 static int ip_vs_svc_hash(struct ip_vs_service *svc)
309 if (svc->flags & IP_VS_SVC_F_HASHED) {
310 IP_VS_ERR("ip_vs_svc_hash(): request for already hashed, "
311 "called from %p\n", __builtin_return_address(0));
315 if (svc->fwmark == 0) {
317 * Hash it by <protocol,addr,port> in ip_vs_svc_table
319 hash = ip_vs_svc_hashkey(svc->protocol, svc->addr, svc->port);
320 list_add(&svc->s_list, &ip_vs_svc_table[hash]);
323 * Hash it by fwmark in ip_vs_svc_fwm_table
325 hash = ip_vs_svc_fwm_hashkey(svc->fwmark);
326 list_add(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
329 svc->flags |= IP_VS_SVC_F_HASHED;
330 /* increase its refcnt because it is referenced by the svc table */
331 atomic_inc(&svc->refcnt);
337 * Unhashes a service from ip_vs_svc_table/ip_vs_svc_fwm_table.
338 * Should be called with locked tables.
340 static int ip_vs_svc_unhash(struct ip_vs_service *svc)
342 if (!(svc->flags & IP_VS_SVC_F_HASHED)) {
343 IP_VS_ERR("ip_vs_svc_unhash(): request for unhash flagged, "
344 "called from %p\n", __builtin_return_address(0));
348 if (svc->fwmark == 0) {
349 /* Remove it from the ip_vs_svc_table table */
350 list_del(&svc->s_list);
352 /* Remove it from the ip_vs_svc_fwm_table table */
353 list_del(&svc->f_list);
356 svc->flags &= ~IP_VS_SVC_F_HASHED;
357 atomic_dec(&svc->refcnt);
363 * Get service by {proto,addr,port} in the service table.
365 static __inline__ struct ip_vs_service *
366 __ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport)
369 struct ip_vs_service *svc;
371 /* Check for "full" addressed entries */
372 hash = ip_vs_svc_hashkey(protocol, vaddr, vport);
374 list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
375 if ((svc->addr == vaddr)
376 && (svc->port == vport)
377 && (svc->protocol == protocol)) {
379 atomic_inc(&svc->usecnt);
389 * Get service by {fwmark} in the service table.
391 static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark)
394 struct ip_vs_service *svc;
396 /* Check for fwmark addressed entries */
397 hash = ip_vs_svc_fwm_hashkey(fwmark);
399 list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) {
400 if (svc->fwmark == fwmark) {
402 atomic_inc(&svc->usecnt);
410 struct ip_vs_service *
411 ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport)
413 struct ip_vs_service *svc;
415 read_lock(&__ip_vs_svc_lock);
418 * Check the table hashed by fwmark first
420 if (fwmark && (svc = __ip_vs_svc_fwm_get(fwmark)))
424 * Check the table hashed by <protocol,addr,port>
425 * for "full" addressed entries
427 svc = __ip_vs_service_get(protocol, vaddr, vport);
430 && protocol == IPPROTO_TCP
431 && atomic_read(&ip_vs_ftpsvc_counter)
432 && (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) {
434 * Check if ftp service entry exists, the packet
435 * might belong to FTP data connections.
437 svc = __ip_vs_service_get(protocol, vaddr, FTPPORT);
441 && atomic_read(&ip_vs_nullsvc_counter)) {
443 * Check if the catch-all port (port zero) exists
445 svc = __ip_vs_service_get(protocol, vaddr, 0);
449 read_unlock(&__ip_vs_svc_lock);
451 IP_VS_DBG(9, "lookup service: fwm %u %s %u.%u.%u.%u:%u %s\n",
452 fwmark, ip_vs_proto_name(protocol),
453 NIPQUAD(vaddr), ntohs(vport),
454 svc?"hit":"not hit");
461 __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
463 atomic_inc(&svc->refcnt);
468 __ip_vs_unbind_svc(struct ip_vs_dest *dest)
470 struct ip_vs_service *svc = dest->svc;
473 if (atomic_dec_and_test(&svc->refcnt))
479 * Returns hash value for real service
481 static __inline__ unsigned ip_vs_rs_hashkey(__be32 addr, __be16 port)
483 register unsigned porth = ntohs(port);
485 return (ntohl(addr)^(porth>>IP_VS_RTAB_BITS)^porth)
490 * Hashes ip_vs_dest in ip_vs_rtable by <proto,addr,port>.
491 * should be called with locked tables.
493 static int ip_vs_rs_hash(struct ip_vs_dest *dest)
497 if (!list_empty(&dest->d_list)) {
502 * Hash by proto,addr,port,
503 * which are the parameters of the real service.
505 hash = ip_vs_rs_hashkey(dest->addr, dest->port);
506 list_add(&dest->d_list, &ip_vs_rtable[hash]);
512 * UNhashes ip_vs_dest from ip_vs_rtable.
513 * should be called with locked tables.
515 static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
518 * Remove it from the ip_vs_rtable table.
520 if (!list_empty(&dest->d_list)) {
521 list_del(&dest->d_list);
522 INIT_LIST_HEAD(&dest->d_list);
529 * Lookup real service by <proto,addr,port> in the real service table.
532 ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport)
535 struct ip_vs_dest *dest;
538 * Check for "full" addressed entries
539 * Return the first found entry
541 hash = ip_vs_rs_hashkey(daddr, dport);
543 read_lock(&__ip_vs_rs_lock);
544 list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) {
545 if ((dest->addr == daddr)
546 && (dest->port == dport)
547 && ((dest->protocol == protocol) ||
550 read_unlock(&__ip_vs_rs_lock);
554 read_unlock(&__ip_vs_rs_lock);
560 * Lookup destination by {addr,port} in the given service
562 static struct ip_vs_dest *
563 ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
565 struct ip_vs_dest *dest;
568 * Find the destination for the given service
570 list_for_each_entry(dest, &svc->destinations, n_list) {
571 if ((dest->addr == daddr) && (dest->port == dport)) {
581 * Find destination by {daddr,dport,vaddr,protocol}
582 * Cretaed to be used in ip_vs_process_message() in
583 * the backup synchronization daemon. It finds the
584 * destination to be bound to the received connection
587 * ip_vs_lookup_real_service() looked promissing, but
588 * seems not working as expected.
590 struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport,
591 __be32 vaddr, __be16 vport, __u16 protocol)
593 struct ip_vs_dest *dest;
594 struct ip_vs_service *svc;
596 svc = ip_vs_service_get(0, protocol, vaddr, vport);
599 dest = ip_vs_lookup_dest(svc, daddr, dport);
601 atomic_inc(&dest->refcnt);
602 ip_vs_service_put(svc);
607 * Lookup dest by {svc,addr,port} in the destination trash.
608 * The destination trash is used to hold the destinations that are removed
609 * from the service table but are still referenced by some conn entries.
610 * The reason to add the destination trash is when the dest is temporary
611 * down (either by administrator or by monitor program), the dest can be
612 * picked back from the trash, the remaining connections to the dest can
613 * continue, and the counting information of the dest is also useful for
616 static struct ip_vs_dest *
617 ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
619 struct ip_vs_dest *dest, *nxt;
622 * Find the destination in trash
624 list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
625 IP_VS_DBG(3, "Destination %u/%u.%u.%u.%u:%u still in trash, "
628 NIPQUAD(dest->addr), ntohs(dest->port),
629 atomic_read(&dest->refcnt));
630 if (dest->addr == daddr &&
631 dest->port == dport &&
632 dest->vfwmark == svc->fwmark &&
633 dest->protocol == svc->protocol &&
635 (dest->vaddr == svc->addr &&
636 dest->vport == svc->port))) {
642 * Try to purge the destination from trash if not referenced
644 if (atomic_read(&dest->refcnt) == 1) {
645 IP_VS_DBG(3, "Removing destination %u/%u.%u.%u.%u:%u "
648 NIPQUAD(dest->addr), ntohs(dest->port));
649 list_del(&dest->n_list);
650 ip_vs_dst_reset(dest);
651 __ip_vs_unbind_svc(dest);
661 * Clean up all the destinations in the trash
662 * Called by the ip_vs_control_cleanup()
664 * When the ip_vs_control_clearup is activated by ipvs module exit,
665 * the service tables must have been flushed and all the connections
666 * are expired, and the refcnt of each destination in the trash must
667 * be 1, so we simply release them here.
669 static void ip_vs_trash_cleanup(void)
671 struct ip_vs_dest *dest, *nxt;
673 list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
674 list_del(&dest->n_list);
675 ip_vs_dst_reset(dest);
676 __ip_vs_unbind_svc(dest);
683 ip_vs_zero_stats(struct ip_vs_stats *stats)
685 spin_lock_bh(&stats->lock);
699 ip_vs_zero_estimator(stats);
701 spin_unlock_bh(&stats->lock);
705 * Update a destination in the given service
708 __ip_vs_update_dest(struct ip_vs_service *svc,
709 struct ip_vs_dest *dest, struct ip_vs_dest_user *udest)
713 /* set the weight and the flags */
714 atomic_set(&dest->weight, udest->weight);
715 conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE;
717 /* check if local node and update the flags */
718 if (inet_addr_type(&init_net, udest->addr) == RTN_LOCAL) {
719 conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK)
720 | IP_VS_CONN_F_LOCALNODE;
723 /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
724 if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) {
725 conn_flags |= IP_VS_CONN_F_NOOUTPUT;
728 * Put the real service in ip_vs_rtable if not present.
729 * For now only for NAT!
731 write_lock_bh(&__ip_vs_rs_lock);
733 write_unlock_bh(&__ip_vs_rs_lock);
735 atomic_set(&dest->conn_flags, conn_flags);
737 /* bind the service */
739 __ip_vs_bind_svc(dest, svc);
741 if (dest->svc != svc) {
742 __ip_vs_unbind_svc(dest);
743 ip_vs_zero_stats(&dest->stats);
744 __ip_vs_bind_svc(dest, svc);
748 /* set the dest status flags */
749 dest->flags |= IP_VS_DEST_F_AVAILABLE;
751 if (udest->u_threshold == 0 || udest->u_threshold > dest->u_threshold)
752 dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
753 dest->u_threshold = udest->u_threshold;
754 dest->l_threshold = udest->l_threshold;
759 * Create a destination for the given service
762 ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
763 struct ip_vs_dest **dest_p)
765 struct ip_vs_dest *dest;
770 atype = inet_addr_type(&init_net, udest->addr);
771 if (atype != RTN_LOCAL && atype != RTN_UNICAST)
774 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
776 IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n");
780 dest->protocol = svc->protocol;
781 dest->vaddr = svc->addr;
782 dest->vport = svc->port;
783 dest->vfwmark = svc->fwmark;
784 dest->addr = udest->addr;
785 dest->port = udest->port;
787 atomic_set(&dest->activeconns, 0);
788 atomic_set(&dest->inactconns, 0);
789 atomic_set(&dest->persistconns, 0);
790 atomic_set(&dest->refcnt, 0);
792 INIT_LIST_HEAD(&dest->d_list);
793 spin_lock_init(&dest->dst_lock);
794 spin_lock_init(&dest->stats.lock);
795 __ip_vs_update_dest(svc, dest, udest);
796 ip_vs_new_estimator(&dest->stats);
806 * Add a destination into an existing service
809 ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
811 struct ip_vs_dest *dest;
812 __be32 daddr = udest->addr;
813 __be16 dport = udest->port;
818 if (udest->weight < 0) {
819 IP_VS_ERR("ip_vs_add_dest(): server weight less than zero\n");
823 if (udest->l_threshold > udest->u_threshold) {
824 IP_VS_ERR("ip_vs_add_dest(): lower threshold is higher than "
825 "upper threshold\n");
830 * Check if the dest already exists in the list
832 dest = ip_vs_lookup_dest(svc, daddr, dport);
834 IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n");
839 * Check if the dest already exists in the trash and
840 * is from the same service
842 dest = ip_vs_trash_get_dest(svc, daddr, dport);
844 IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, "
845 "dest->refcnt=%d, service %u/%u.%u.%u.%u:%u\n",
846 NIPQUAD(daddr), ntohs(dport),
847 atomic_read(&dest->refcnt),
849 NIPQUAD(dest->vaddr),
851 __ip_vs_update_dest(svc, dest, udest);
854 * Get the destination from the trash
856 list_del(&dest->n_list);
858 ip_vs_new_estimator(&dest->stats);
860 write_lock_bh(&__ip_vs_svc_lock);
863 * Wait until all other svc users go away.
865 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
867 list_add(&dest->n_list, &svc->destinations);
870 /* call the update_service function of its scheduler */
871 svc->scheduler->update_service(svc);
873 write_unlock_bh(&__ip_vs_svc_lock);
878 * Allocate and initialize the dest structure
880 ret = ip_vs_new_dest(svc, udest, &dest);
886 * Add the dest entry into the list
888 atomic_inc(&dest->refcnt);
890 write_lock_bh(&__ip_vs_svc_lock);
893 * Wait until all other svc users go away.
895 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
897 list_add(&dest->n_list, &svc->destinations);
900 /* call the update_service function of its scheduler */
901 svc->scheduler->update_service(svc);
903 write_unlock_bh(&__ip_vs_svc_lock);
912 * Edit a destination in the given service
915 ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
917 struct ip_vs_dest *dest;
918 __be32 daddr = udest->addr;
919 __be16 dport = udest->port;
923 if (udest->weight < 0) {
924 IP_VS_ERR("ip_vs_edit_dest(): server weight less than zero\n");
928 if (udest->l_threshold > udest->u_threshold) {
929 IP_VS_ERR("ip_vs_edit_dest(): lower threshold is higher than "
930 "upper threshold\n");
935 * Lookup the destination list
937 dest = ip_vs_lookup_dest(svc, daddr, dport);
939 IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n");
943 __ip_vs_update_dest(svc, dest, udest);
945 write_lock_bh(&__ip_vs_svc_lock);
947 /* Wait until all other svc users go away */
948 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
950 /* call the update_service, because server weight may be changed */
951 svc->scheduler->update_service(svc);
953 write_unlock_bh(&__ip_vs_svc_lock);
962 * Delete a destination (must be already unlinked from the service)
964 static void __ip_vs_del_dest(struct ip_vs_dest *dest)
966 ip_vs_kill_estimator(&dest->stats);
969 * Remove it from the d-linked list with the real services.
971 write_lock_bh(&__ip_vs_rs_lock);
972 ip_vs_rs_unhash(dest);
973 write_unlock_bh(&__ip_vs_rs_lock);
976 * Decrease the refcnt of the dest, and free the dest
977 * if nobody refers to it (refcnt=0). Otherwise, throw
978 * the destination into the trash.
980 if (atomic_dec_and_test(&dest->refcnt)) {
981 ip_vs_dst_reset(dest);
982 /* simply decrease svc->refcnt here, let the caller check
983 and release the service if nobody refers to it.
984 Only user context can release destination and service,
985 and only one user context can update virtual service at a
986 time, so the operation here is OK */
987 atomic_dec(&dest->svc->refcnt);
990 IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, "
992 NIPQUAD(dest->addr), ntohs(dest->port),
993 atomic_read(&dest->refcnt));
994 list_add(&dest->n_list, &ip_vs_dest_trash);
995 atomic_inc(&dest->refcnt);
1001 * Unlink a destination from the given service
1003 static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1004 struct ip_vs_dest *dest,
1007 dest->flags &= ~IP_VS_DEST_F_AVAILABLE;
1010 * Remove it from the d-linked destination list.
1012 list_del(&dest->n_list);
1016 * Call the update_service function of its scheduler
1018 svc->scheduler->update_service(svc);
1024 * Delete a destination server in the given service
1027 ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest)
1029 struct ip_vs_dest *dest;
1030 __be32 daddr = udest->addr;
1031 __be16 dport = udest->port;
1035 dest = ip_vs_lookup_dest(svc, daddr, dport);
1037 IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n");
1041 write_lock_bh(&__ip_vs_svc_lock);
1044 * Wait until all other svc users go away.
1046 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
1049 * Unlink dest from the service
1051 __ip_vs_unlink_dest(svc, dest, 1);
1053 write_unlock_bh(&__ip_vs_svc_lock);
1056 * Delete the destination
1058 __ip_vs_del_dest(dest);
1067 * Add a service into the service hash table
1070 ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1073 struct ip_vs_scheduler *sched = NULL;
1074 struct ip_vs_service *svc = NULL;
1076 /* increase the module use count */
1077 ip_vs_use_count_inc();
1079 /* Lookup the scheduler by 'u->sched_name' */
1080 sched = ip_vs_scheduler_get(u->sched_name);
1081 if (sched == NULL) {
1082 IP_VS_INFO("Scheduler module ip_vs_%s not found\n",
1088 svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
1090 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n");
1095 /* I'm the first user of the service */
1096 atomic_set(&svc->usecnt, 1);
1097 atomic_set(&svc->refcnt, 0);
1099 svc->protocol = u->protocol;
1100 svc->addr = u->addr;
1101 svc->port = u->port;
1102 svc->fwmark = u->fwmark;
1103 svc->flags = u->flags;
1104 svc->timeout = u->timeout * HZ;
1105 svc->netmask = u->netmask;
1107 INIT_LIST_HEAD(&svc->destinations);
1108 rwlock_init(&svc->sched_lock);
1109 spin_lock_init(&svc->stats.lock);
1111 /* Bind the scheduler */
1112 ret = ip_vs_bind_scheduler(svc, sched);
1117 /* Update the virtual service counters */
1118 if (svc->port == FTPPORT)
1119 atomic_inc(&ip_vs_ftpsvc_counter);
1120 else if (svc->port == 0)
1121 atomic_inc(&ip_vs_nullsvc_counter);
1123 ip_vs_new_estimator(&svc->stats);
1124 ip_vs_num_services++;
1126 /* Hash the service into the service table */
1127 write_lock_bh(&__ip_vs_svc_lock);
1128 ip_vs_svc_hash(svc);
1129 write_unlock_bh(&__ip_vs_svc_lock);
1137 ip_vs_unbind_scheduler(svc);
1140 ip_vs_app_inc_put(svc->inc);
1145 ip_vs_scheduler_put(sched);
1148 /* decrease the module use count */
1149 ip_vs_use_count_dec();
1156 * Edit a service and bind it with a new scheduler
1159 ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u)
1161 struct ip_vs_scheduler *sched, *old_sched;
1165 * Lookup the scheduler, by 'u->sched_name'
1167 sched = ip_vs_scheduler_get(u->sched_name);
1168 if (sched == NULL) {
1169 IP_VS_INFO("Scheduler module ip_vs_%s not found\n",
1175 write_lock_bh(&__ip_vs_svc_lock);
1178 * Wait until all other svc users go away.
1180 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
1183 * Set the flags and timeout value
1185 svc->flags = u->flags | IP_VS_SVC_F_HASHED;
1186 svc->timeout = u->timeout * HZ;
1187 svc->netmask = u->netmask;
1189 old_sched = svc->scheduler;
1190 if (sched != old_sched) {
1192 * Unbind the old scheduler
1194 if ((ret = ip_vs_unbind_scheduler(svc))) {
1200 * Bind the new scheduler
1202 if ((ret = ip_vs_bind_scheduler(svc, sched))) {
1204 * If ip_vs_bind_scheduler fails, restore the old
1206 * The main reason of failure is out of memory.
1208 * The question is if the old scheduler can be
1209 * restored all the time. TODO: if it cannot be
1210 * restored some time, we must delete the service,
1211 * otherwise the system may crash.
1213 ip_vs_bind_scheduler(svc, old_sched);
1220 write_unlock_bh(&__ip_vs_svc_lock);
1223 ip_vs_scheduler_put(old_sched);
1230 * Delete a service from the service list
1231 * - The service must be unlinked, unlocked and not referenced!
1232 * - We are called under _bh lock
1234 static void __ip_vs_del_service(struct ip_vs_service *svc)
1236 struct ip_vs_dest *dest, *nxt;
1237 struct ip_vs_scheduler *old_sched;
1239 ip_vs_num_services--;
1240 ip_vs_kill_estimator(&svc->stats);
1242 /* Unbind scheduler */
1243 old_sched = svc->scheduler;
1244 ip_vs_unbind_scheduler(svc);
1246 ip_vs_scheduler_put(old_sched);
1248 /* Unbind app inc */
1250 ip_vs_app_inc_put(svc->inc);
1255 * Unlink the whole destination list
1257 list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
1258 __ip_vs_unlink_dest(svc, dest, 0);
1259 __ip_vs_del_dest(dest);
1263 * Update the virtual service counters
1265 if (svc->port == FTPPORT)
1266 atomic_dec(&ip_vs_ftpsvc_counter);
1267 else if (svc->port == 0)
1268 atomic_dec(&ip_vs_nullsvc_counter);
1271 * Free the service if nobody refers to it
1273 if (atomic_read(&svc->refcnt) == 0)
1276 /* decrease the module use count */
1277 ip_vs_use_count_dec();
1281 * Delete a service from the service list
1283 static int ip_vs_del_service(struct ip_vs_service *svc)
1289 * Unhash it from the service table
1291 write_lock_bh(&__ip_vs_svc_lock);
1293 ip_vs_svc_unhash(svc);
1296 * Wait until all the svc users go away.
1298 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
1300 __ip_vs_del_service(svc);
1302 write_unlock_bh(&__ip_vs_svc_lock);
1309 * Flush all the virtual services
1311 static int ip_vs_flush(void)
1314 struct ip_vs_service *svc, *nxt;
1317 * Flush the service table hashed by <protocol,addr,port>
1319 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1320 list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], s_list) {
1321 write_lock_bh(&__ip_vs_svc_lock);
1322 ip_vs_svc_unhash(svc);
1324 * Wait until all the svc users go away.
1326 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
1327 __ip_vs_del_service(svc);
1328 write_unlock_bh(&__ip_vs_svc_lock);
1333 * Flush the service table hashed by fwmark
1335 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1336 list_for_each_entry_safe(svc, nxt,
1337 &ip_vs_svc_fwm_table[idx], f_list) {
1338 write_lock_bh(&__ip_vs_svc_lock);
1339 ip_vs_svc_unhash(svc);
1341 * Wait until all the svc users go away.
1343 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
1344 __ip_vs_del_service(svc);
1345 write_unlock_bh(&__ip_vs_svc_lock);
1354 * Zero counters in a service or all services
1356 static int ip_vs_zero_service(struct ip_vs_service *svc)
1358 struct ip_vs_dest *dest;
1360 write_lock_bh(&__ip_vs_svc_lock);
1361 list_for_each_entry(dest, &svc->destinations, n_list) {
1362 ip_vs_zero_stats(&dest->stats);
1364 ip_vs_zero_stats(&svc->stats);
1365 write_unlock_bh(&__ip_vs_svc_lock);
1369 static int ip_vs_zero_all(void)
1372 struct ip_vs_service *svc;
1374 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1375 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
1376 ip_vs_zero_service(svc);
1380 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1381 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
1382 ip_vs_zero_service(svc);
1386 ip_vs_zero_stats(&ip_vs_stats);
1392 proc_do_defense_mode(ctl_table *table, int write, struct file * filp,
1393 void __user *buffer, size_t *lenp, loff_t *ppos)
1395 int *valp = table->data;
1399 rc = proc_dointvec(table, write, filp, buffer, lenp, ppos);
1400 if (write && (*valp != val)) {
1401 if ((*valp < 0) || (*valp > 3)) {
1402 /* Restore the correct value */
1405 update_defense_level();
1413 proc_do_sync_threshold(ctl_table *table, int write, struct file *filp,
1414 void __user *buffer, size_t *lenp, loff_t *ppos)
1416 int *valp = table->data;
1420 /* backup the value first */
1421 memcpy(val, valp, sizeof(val));
1423 rc = proc_dointvec(table, write, filp, buffer, lenp, ppos);
1424 if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) {
1425 /* Restore the correct value */
1426 memcpy(valp, val, sizeof(val));
1433 * IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
1436 static struct ctl_table vs_vars[] = {
1438 .procname = "amemthresh",
1439 .data = &sysctl_ip_vs_amemthresh,
1440 .maxlen = sizeof(int),
1442 .proc_handler = &proc_dointvec,
1444 #ifdef CONFIG_IP_VS_DEBUG
1446 .procname = "debug_level",
1447 .data = &sysctl_ip_vs_debug_level,
1448 .maxlen = sizeof(int),
1450 .proc_handler = &proc_dointvec,
1454 .procname = "am_droprate",
1455 .data = &sysctl_ip_vs_am_droprate,
1456 .maxlen = sizeof(int),
1458 .proc_handler = &proc_dointvec,
1461 .procname = "drop_entry",
1462 .data = &sysctl_ip_vs_drop_entry,
1463 .maxlen = sizeof(int),
1465 .proc_handler = &proc_do_defense_mode,
1468 .procname = "drop_packet",
1469 .data = &sysctl_ip_vs_drop_packet,
1470 .maxlen = sizeof(int),
1472 .proc_handler = &proc_do_defense_mode,
1475 .procname = "secure_tcp",
1476 .data = &sysctl_ip_vs_secure_tcp,
1477 .maxlen = sizeof(int),
1479 .proc_handler = &proc_do_defense_mode,
1483 .procname = "timeout_established",
1484 .data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED],
1485 .maxlen = sizeof(int),
1487 .proc_handler = &proc_dointvec_jiffies,
1490 .procname = "timeout_synsent",
1491 .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT],
1492 .maxlen = sizeof(int),
1494 .proc_handler = &proc_dointvec_jiffies,
1497 .procname = "timeout_synrecv",
1498 .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV],
1499 .maxlen = sizeof(int),
1501 .proc_handler = &proc_dointvec_jiffies,
1504 .procname = "timeout_finwait",
1505 .data = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT],
1506 .maxlen = sizeof(int),
1508 .proc_handler = &proc_dointvec_jiffies,
1511 .procname = "timeout_timewait",
1512 .data = &vs_timeout_table_dos.timeout[IP_VS_S_TIME_WAIT],
1513 .maxlen = sizeof(int),
1515 .proc_handler = &proc_dointvec_jiffies,
1518 .procname = "timeout_close",
1519 .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE],
1520 .maxlen = sizeof(int),
1522 .proc_handler = &proc_dointvec_jiffies,
1525 .procname = "timeout_closewait",
1526 .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT],
1527 .maxlen = sizeof(int),
1529 .proc_handler = &proc_dointvec_jiffies,
1532 .procname = "timeout_lastack",
1533 .data = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK],
1534 .maxlen = sizeof(int),
1536 .proc_handler = &proc_dointvec_jiffies,
1539 .procname = "timeout_listen",
1540 .data = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN],
1541 .maxlen = sizeof(int),
1543 .proc_handler = &proc_dointvec_jiffies,
1546 .procname = "timeout_synack",
1547 .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK],
1548 .maxlen = sizeof(int),
1550 .proc_handler = &proc_dointvec_jiffies,
1553 .procname = "timeout_udp",
1554 .data = &vs_timeout_table_dos.timeout[IP_VS_S_UDP],
1555 .maxlen = sizeof(int),
1557 .proc_handler = &proc_dointvec_jiffies,
1560 .procname = "timeout_icmp",
1561 .data = &vs_timeout_table_dos.timeout[IP_VS_S_ICMP],
1562 .maxlen = sizeof(int),
1564 .proc_handler = &proc_dointvec_jiffies,
1568 .procname = "cache_bypass",
1569 .data = &sysctl_ip_vs_cache_bypass,
1570 .maxlen = sizeof(int),
1572 .proc_handler = &proc_dointvec,
1575 .procname = "expire_nodest_conn",
1576 .data = &sysctl_ip_vs_expire_nodest_conn,
1577 .maxlen = sizeof(int),
1579 .proc_handler = &proc_dointvec,
1582 .procname = "expire_quiescent_template",
1583 .data = &sysctl_ip_vs_expire_quiescent_template,
1584 .maxlen = sizeof(int),
1586 .proc_handler = &proc_dointvec,
1589 .procname = "sync_threshold",
1590 .data = &sysctl_ip_vs_sync_threshold,
1591 .maxlen = sizeof(sysctl_ip_vs_sync_threshold),
1593 .proc_handler = &proc_do_sync_threshold,
1596 .procname = "nat_icmp_send",
1597 .data = &sysctl_ip_vs_nat_icmp_send,
1598 .maxlen = sizeof(int),
1600 .proc_handler = &proc_dointvec,
1605 const struct ctl_path net_vs_ctl_path[] = {
1606 { .procname = "net", .ctl_name = CTL_NET, },
1607 { .procname = "ipv4", .ctl_name = NET_IPV4, },
1608 { .procname = "vs", },
1611 EXPORT_SYMBOL_GPL(net_vs_ctl_path);
1613 static struct ctl_table_header * sysctl_header;
1615 #ifdef CONFIG_PROC_FS
1618 struct list_head *table;
1623 * Write the contents of the VS rule table to a PROCfs file.
1624 * (It is kept just for backward compatibility)
1626 static inline const char *ip_vs_fwd_name(unsigned flags)
1628 switch (flags & IP_VS_CONN_F_FWD_MASK) {
1629 case IP_VS_CONN_F_LOCALNODE:
1631 case IP_VS_CONN_F_TUNNEL:
1633 case IP_VS_CONN_F_DROUTE:
1641 /* Get the Nth entry in the two lists */
1642 static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
1644 struct ip_vs_iter *iter = seq->private;
1646 struct ip_vs_service *svc;
1648 /* look in hash by protocol */
1649 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1650 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
1652 iter->table = ip_vs_svc_table;
1659 /* keep looking in fwmark */
1660 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1661 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
1663 iter->table = ip_vs_svc_fwm_table;
1673 static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos)
1676 read_lock_bh(&__ip_vs_svc_lock);
1677 return *pos ? ip_vs_info_array(seq, *pos - 1) : SEQ_START_TOKEN;
1681 static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1683 struct list_head *e;
1684 struct ip_vs_iter *iter;
1685 struct ip_vs_service *svc;
1688 if (v == SEQ_START_TOKEN)
1689 return ip_vs_info_array(seq,0);
1692 iter = seq->private;
1694 if (iter->table == ip_vs_svc_table) {
1695 /* next service in table hashed by protocol */
1696 if ((e = svc->s_list.next) != &ip_vs_svc_table[iter->bucket])
1697 return list_entry(e, struct ip_vs_service, s_list);
1700 while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
1701 list_for_each_entry(svc,&ip_vs_svc_table[iter->bucket],
1707 iter->table = ip_vs_svc_fwm_table;
1712 /* next service in hashed by fwmark */
1713 if ((e = svc->f_list.next) != &ip_vs_svc_fwm_table[iter->bucket])
1714 return list_entry(e, struct ip_vs_service, f_list);
1717 while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
1718 list_for_each_entry(svc, &ip_vs_svc_fwm_table[iter->bucket],
1726 static void ip_vs_info_seq_stop(struct seq_file *seq, void *v)
1728 read_unlock_bh(&__ip_vs_svc_lock);
1732 static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1734 if (v == SEQ_START_TOKEN) {
1736 "IP Virtual Server version %d.%d.%d (size=%d)\n",
1737 NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE);
1739 "Prot LocalAddress:Port Scheduler Flags\n");
1741 " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n");
1743 const struct ip_vs_service *svc = v;
1744 const struct ip_vs_iter *iter = seq->private;
1745 const struct ip_vs_dest *dest;
1747 if (iter->table == ip_vs_svc_table)
1748 seq_printf(seq, "%s %08X:%04X %s ",
1749 ip_vs_proto_name(svc->protocol),
1752 svc->scheduler->name);
1754 seq_printf(seq, "FWM %08X %s ",
1755 svc->fwmark, svc->scheduler->name);
1757 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
1758 seq_printf(seq, "persistent %d %08X\n",
1760 ntohl(svc->netmask));
1762 seq_putc(seq, '\n');
1764 list_for_each_entry(dest, &svc->destinations, n_list) {
1766 " -> %08X:%04X %-7s %-6d %-10d %-10d\n",
1767 ntohl(dest->addr), ntohs(dest->port),
1768 ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
1769 atomic_read(&dest->weight),
1770 atomic_read(&dest->activeconns),
1771 atomic_read(&dest->inactconns));
1777 static const struct seq_operations ip_vs_info_seq_ops = {
1778 .start = ip_vs_info_seq_start,
1779 .next = ip_vs_info_seq_next,
1780 .stop = ip_vs_info_seq_stop,
1781 .show = ip_vs_info_seq_show,
1784 static int ip_vs_info_open(struct inode *inode, struct file *file)
1786 return seq_open_private(file, &ip_vs_info_seq_ops,
1787 sizeof(struct ip_vs_iter));
1790 static const struct file_operations ip_vs_info_fops = {
1791 .owner = THIS_MODULE,
1792 .open = ip_vs_info_open,
1794 .llseek = seq_lseek,
1795 .release = seq_release_private,
1800 struct ip_vs_stats ip_vs_stats = {
1801 .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
1804 #ifdef CONFIG_PROC_FS
1805 static int ip_vs_stats_show(struct seq_file *seq, void *v)
1808 /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
1810 " Total Incoming Outgoing Incoming Outgoing\n");
1812 " Conns Packets Packets Bytes Bytes\n");
1814 spin_lock_bh(&ip_vs_stats.lock);
1815 seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.conns,
1816 ip_vs_stats.inpkts, ip_vs_stats.outpkts,
1817 (unsigned long long) ip_vs_stats.inbytes,
1818 (unsigned long long) ip_vs_stats.outbytes);
1820 /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
1822 " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
1823 seq_printf(seq,"%8X %8X %8X %16X %16X\n",
1828 ip_vs_stats.outbps);
1829 spin_unlock_bh(&ip_vs_stats.lock);
1834 static int ip_vs_stats_seq_open(struct inode *inode, struct file *file)
1836 return single_open(file, ip_vs_stats_show, NULL);
1839 static const struct file_operations ip_vs_stats_fops = {
1840 .owner = THIS_MODULE,
1841 .open = ip_vs_stats_seq_open,
1843 .llseek = seq_lseek,
1844 .release = single_release,
1850 * Set timeout values for tcp tcpfin udp in the timeout_table.
1852 static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
1854 IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n",
1859 #ifdef CONFIG_IP_VS_PROTO_TCP
1860 if (u->tcp_timeout) {
1861 ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED]
1862 = u->tcp_timeout * HZ;
1865 if (u->tcp_fin_timeout) {
1866 ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT]
1867 = u->tcp_fin_timeout * HZ;
1871 #ifdef CONFIG_IP_VS_PROTO_UDP
1872 if (u->udp_timeout) {
1873 ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL]
1874 = u->udp_timeout * HZ;
1881 #define SET_CMDID(cmd) (cmd - IP_VS_BASE_CTL)
1882 #define SERVICE_ARG_LEN (sizeof(struct ip_vs_service_user))
1883 #define SVCDEST_ARG_LEN (sizeof(struct ip_vs_service_user) + \
1884 sizeof(struct ip_vs_dest_user))
1885 #define TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user))
1886 #define DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user))
1887 #define MAX_ARG_LEN SVCDEST_ARG_LEN
1889 static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = {
1890 [SET_CMDID(IP_VS_SO_SET_ADD)] = SERVICE_ARG_LEN,
1891 [SET_CMDID(IP_VS_SO_SET_EDIT)] = SERVICE_ARG_LEN,
1892 [SET_CMDID(IP_VS_SO_SET_DEL)] = SERVICE_ARG_LEN,
1893 [SET_CMDID(IP_VS_SO_SET_FLUSH)] = 0,
1894 [SET_CMDID(IP_VS_SO_SET_ADDDEST)] = SVCDEST_ARG_LEN,
1895 [SET_CMDID(IP_VS_SO_SET_DELDEST)] = SVCDEST_ARG_LEN,
1896 [SET_CMDID(IP_VS_SO_SET_EDITDEST)] = SVCDEST_ARG_LEN,
1897 [SET_CMDID(IP_VS_SO_SET_TIMEOUT)] = TIMEOUT_ARG_LEN,
1898 [SET_CMDID(IP_VS_SO_SET_STARTDAEMON)] = DAEMON_ARG_LEN,
1899 [SET_CMDID(IP_VS_SO_SET_STOPDAEMON)] = DAEMON_ARG_LEN,
1900 [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN,
1904 do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1907 unsigned char arg[MAX_ARG_LEN];
1908 struct ip_vs_service_user *usvc;
1909 struct ip_vs_service *svc;
1910 struct ip_vs_dest_user *udest;
1912 if (!capable(CAP_NET_ADMIN))
1915 if (len != set_arglen[SET_CMDID(cmd)]) {
1916 IP_VS_ERR("set_ctl: len %u != %u\n",
1917 len, set_arglen[SET_CMDID(cmd)]);
1921 if (copy_from_user(arg, user, len) != 0)
1924 /* increase the module use count */
1925 ip_vs_use_count_inc();
1927 if (mutex_lock_interruptible(&__ip_vs_mutex)) {
1932 if (cmd == IP_VS_SO_SET_FLUSH) {
1933 /* Flush the virtual service */
1934 ret = ip_vs_flush();
1936 } else if (cmd == IP_VS_SO_SET_TIMEOUT) {
1937 /* Set timeout values for (tcp tcpfin udp) */
1938 ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg);
1940 } else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
1941 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
1942 ret = start_sync_thread(dm->state, dm->mcast_ifn, dm->syncid);
1944 } else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
1945 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
1946 ret = stop_sync_thread(dm->state);
1950 usvc = (struct ip_vs_service_user *)arg;
1951 udest = (struct ip_vs_dest_user *)(usvc + 1);
1953 if (cmd == IP_VS_SO_SET_ZERO) {
1954 /* if no service address is set, zero counters in all */
1955 if (!usvc->fwmark && !usvc->addr && !usvc->port) {
1956 ret = ip_vs_zero_all();
1961 /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */
1962 if (usvc->protocol!=IPPROTO_TCP && usvc->protocol!=IPPROTO_UDP) {
1963 IP_VS_ERR("set_ctl: invalid protocol: %d %d.%d.%d.%d:%d %s\n",
1964 usvc->protocol, NIPQUAD(usvc->addr),
1965 ntohs(usvc->port), usvc->sched_name);
1970 /* Lookup the exact service by <protocol, addr, port> or fwmark */
1971 if (usvc->fwmark == 0)
1972 svc = __ip_vs_service_get(usvc->protocol,
1973 usvc->addr, usvc->port);
1975 svc = __ip_vs_svc_fwm_get(usvc->fwmark);
1977 if (cmd != IP_VS_SO_SET_ADD
1978 && (svc == NULL || svc->protocol != usvc->protocol)) {
1984 case IP_VS_SO_SET_ADD:
1988 ret = ip_vs_add_service(usvc, &svc);
1990 case IP_VS_SO_SET_EDIT:
1991 ret = ip_vs_edit_service(svc, usvc);
1993 case IP_VS_SO_SET_DEL:
1994 ret = ip_vs_del_service(svc);
1998 case IP_VS_SO_SET_ZERO:
1999 ret = ip_vs_zero_service(svc);
2001 case IP_VS_SO_SET_ADDDEST:
2002 ret = ip_vs_add_dest(svc, udest);
2004 case IP_VS_SO_SET_EDITDEST:
2005 ret = ip_vs_edit_dest(svc, udest);
2007 case IP_VS_SO_SET_DELDEST:
2008 ret = ip_vs_del_dest(svc, udest);
2015 ip_vs_service_put(svc);
2018 mutex_unlock(&__ip_vs_mutex);
2020 /* decrease the module use count */
2021 ip_vs_use_count_dec();
2028 ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
2030 spin_lock_bh(&src->lock);
2031 memcpy(dst, src, (char*)&src->lock - (char*)src);
2032 spin_unlock_bh(&src->lock);
2036 ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
2038 dst->protocol = src->protocol;
2039 dst->addr = src->addr;
2040 dst->port = src->port;
2041 dst->fwmark = src->fwmark;
2042 strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name));
2043 dst->flags = src->flags;
2044 dst->timeout = src->timeout / HZ;
2045 dst->netmask = src->netmask;
2046 dst->num_dests = src->num_dests;
2047 ip_vs_copy_stats(&dst->stats, &src->stats);
2051 __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
2052 struct ip_vs_get_services __user *uptr)
2055 struct ip_vs_service *svc;
2056 struct ip_vs_service_entry entry;
2059 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
2060 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
2061 if (count >= get->num_services)
2063 memset(&entry, 0, sizeof(entry));
2064 ip_vs_copy_service(&entry, svc);
2065 if (copy_to_user(&uptr->entrytable[count],
2066 &entry, sizeof(entry))) {
2074 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
2075 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
2076 if (count >= get->num_services)
2078 memset(&entry, 0, sizeof(entry));
2079 ip_vs_copy_service(&entry, svc);
2080 if (copy_to_user(&uptr->entrytable[count],
2081 &entry, sizeof(entry))) {
2093 __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
2094 struct ip_vs_get_dests __user *uptr)
2096 struct ip_vs_service *svc;
2100 svc = __ip_vs_svc_fwm_get(get->fwmark);
2102 svc = __ip_vs_service_get(get->protocol,
2103 get->addr, get->port);
2106 struct ip_vs_dest *dest;
2107 struct ip_vs_dest_entry entry;
2109 list_for_each_entry(dest, &svc->destinations, n_list) {
2110 if (count >= get->num_dests)
2113 entry.addr = dest->addr;
2114 entry.port = dest->port;
2115 entry.conn_flags = atomic_read(&dest->conn_flags);
2116 entry.weight = atomic_read(&dest->weight);
2117 entry.u_threshold = dest->u_threshold;
2118 entry.l_threshold = dest->l_threshold;
2119 entry.activeconns = atomic_read(&dest->activeconns);
2120 entry.inactconns = atomic_read(&dest->inactconns);
2121 entry.persistconns = atomic_read(&dest->persistconns);
2122 ip_vs_copy_stats(&entry.stats, &dest->stats);
2123 if (copy_to_user(&uptr->entrytable[count],
2124 &entry, sizeof(entry))) {
2130 ip_vs_service_put(svc);
2137 __ip_vs_get_timeouts(struct ip_vs_timeout_user *u)
2139 #ifdef CONFIG_IP_VS_PROTO_TCP
2141 ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
2142 u->tcp_fin_timeout =
2143 ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
2145 #ifdef CONFIG_IP_VS_PROTO_UDP
2147 ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
2152 #define GET_CMDID(cmd) (cmd - IP_VS_BASE_CTL)
2153 #define GET_INFO_ARG_LEN (sizeof(struct ip_vs_getinfo))
2154 #define GET_SERVICES_ARG_LEN (sizeof(struct ip_vs_get_services))
2155 #define GET_SERVICE_ARG_LEN (sizeof(struct ip_vs_service_entry))
2156 #define GET_DESTS_ARG_LEN (sizeof(struct ip_vs_get_dests))
2157 #define GET_TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user))
2158 #define GET_DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user) * 2)
2160 static const unsigned char get_arglen[GET_CMDID(IP_VS_SO_GET_MAX)+1] = {
2161 [GET_CMDID(IP_VS_SO_GET_VERSION)] = 64,
2162 [GET_CMDID(IP_VS_SO_GET_INFO)] = GET_INFO_ARG_LEN,
2163 [GET_CMDID(IP_VS_SO_GET_SERVICES)] = GET_SERVICES_ARG_LEN,
2164 [GET_CMDID(IP_VS_SO_GET_SERVICE)] = GET_SERVICE_ARG_LEN,
2165 [GET_CMDID(IP_VS_SO_GET_DESTS)] = GET_DESTS_ARG_LEN,
2166 [GET_CMDID(IP_VS_SO_GET_TIMEOUT)] = GET_TIMEOUT_ARG_LEN,
2167 [GET_CMDID(IP_VS_SO_GET_DAEMON)] = GET_DAEMON_ARG_LEN,
2171 do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2173 unsigned char arg[128];
2176 if (!capable(CAP_NET_ADMIN))
2179 if (*len < get_arglen[GET_CMDID(cmd)]) {
2180 IP_VS_ERR("get_ctl: len %u < %u\n",
2181 *len, get_arglen[GET_CMDID(cmd)]);
2185 if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0)
2188 if (mutex_lock_interruptible(&__ip_vs_mutex))
2189 return -ERESTARTSYS;
2192 case IP_VS_SO_GET_VERSION:
2196 sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
2197 NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE);
2198 if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
2202 *len = strlen(buf)+1;
2206 case IP_VS_SO_GET_INFO:
2208 struct ip_vs_getinfo info;
2209 info.version = IP_VS_VERSION_CODE;
2210 info.size = IP_VS_CONN_TAB_SIZE;
2211 info.num_services = ip_vs_num_services;
2212 if (copy_to_user(user, &info, sizeof(info)) != 0)
2217 case IP_VS_SO_GET_SERVICES:
2219 struct ip_vs_get_services *get;
2222 get = (struct ip_vs_get_services *)arg;
2223 size = sizeof(*get) +
2224 sizeof(struct ip_vs_service_entry) * get->num_services;
2226 IP_VS_ERR("length: %u != %u\n", *len, size);
2230 ret = __ip_vs_get_service_entries(get, user);
2234 case IP_VS_SO_GET_SERVICE:
2236 struct ip_vs_service_entry *entry;
2237 struct ip_vs_service *svc;
2239 entry = (struct ip_vs_service_entry *)arg;
2241 svc = __ip_vs_svc_fwm_get(entry->fwmark);
2243 svc = __ip_vs_service_get(entry->protocol,
2244 entry->addr, entry->port);
2246 ip_vs_copy_service(entry, svc);
2247 if (copy_to_user(user, entry, sizeof(*entry)) != 0)
2249 ip_vs_service_put(svc);
2255 case IP_VS_SO_GET_DESTS:
2257 struct ip_vs_get_dests *get;
2260 get = (struct ip_vs_get_dests *)arg;
2261 size = sizeof(*get) +
2262 sizeof(struct ip_vs_dest_entry) * get->num_dests;
2264 IP_VS_ERR("length: %u != %u\n", *len, size);
2268 ret = __ip_vs_get_dest_entries(get, user);
2272 case IP_VS_SO_GET_TIMEOUT:
2274 struct ip_vs_timeout_user t;
2276 __ip_vs_get_timeouts(&t);
2277 if (copy_to_user(user, &t, sizeof(t)) != 0)
2282 case IP_VS_SO_GET_DAEMON:
2284 struct ip_vs_daemon_user d[2];
2286 memset(&d, 0, sizeof(d));
2287 if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
2288 d[0].state = IP_VS_STATE_MASTER;
2289 strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
2290 d[0].syncid = ip_vs_master_syncid;
2292 if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
2293 d[1].state = IP_VS_STATE_BACKUP;
2294 strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
2295 d[1].syncid = ip_vs_backup_syncid;
2297 if (copy_to_user(user, &d, sizeof(d)) != 0)
2307 mutex_unlock(&__ip_vs_mutex);
2312 static struct nf_sockopt_ops ip_vs_sockopts = {
2314 .set_optmin = IP_VS_BASE_CTL,
2315 .set_optmax = IP_VS_SO_SET_MAX+1,
2316 .set = do_ip_vs_set_ctl,
2317 .get_optmin = IP_VS_BASE_CTL,
2318 .get_optmax = IP_VS_SO_GET_MAX+1,
2319 .get = do_ip_vs_get_ctl,
2320 .owner = THIS_MODULE,
2324 int __init ip_vs_control_init(void)
2331 ret = nf_register_sockopt(&ip_vs_sockopts);
2333 IP_VS_ERR("cannot register sockopt.\n");
2337 proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
2338 proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
2340 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars);
2342 /* Initialize ip_vs_svc_table, ip_vs_svc_fwm_table, ip_vs_rtable */
2343 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
2344 INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
2345 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
2347 for(idx = 0; idx < IP_VS_RTAB_SIZE; idx++) {
2348 INIT_LIST_HEAD(&ip_vs_rtable[idx]);
2351 ip_vs_new_estimator(&ip_vs_stats);
2353 /* Hook the defense timer */
2354 schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
2361 void ip_vs_control_cleanup(void)
2364 ip_vs_trash_cleanup();
2365 cancel_rearming_delayed_work(&defense_work);
2366 cancel_work_sync(&defense_work.work);
2367 ip_vs_kill_estimator(&ip_vs_stats);
2368 unregister_sysctl_table(sysctl_header);
2369 proc_net_remove(&init_net, "ip_vs_stats");
2370 proc_net_remove(&init_net, "ip_vs");
2371 nf_unregister_sockopt(&ip_vs_sockopts);