2 * IPVS: Locality-Based Least-Connection scheduling module
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Martin Hamilton : fixed the terrible locking bugs
13 * *lock(tbl->lock) ==> *lock(&tbl->lock)
14 * Wensong Zhang : fixed the uninitilized tbl->lock bug
15 * Wensong Zhang : added doing full expiration check to
16 * collect stale entries of 24+ hours when
17 * no partial expire check in a half hour
18 * Julian Anastasov : replaced del_timer call with del_timer_sync
19 * to avoid the possible race between timer
20 * handler and del_timer thread in SMP
25 * The lblc algorithm is as follows (pseudo code):
27 * if cachenode[dest_ip] is null then
28 * n, cachenode[dest_ip] <- {weighted least-conn node};
30 * n <- cachenode[dest_ip];
32 * (n.conns>n.weight AND
33 * there is a node m with m.conns<m.weight/2) then
34 * n, cachenode[dest_ip] <- {weighted least-conn node};
38 * Thanks must go to Wenzhuo Zhang for talking WCCP to me and pushing
39 * me to write this module.
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/skbuff.h>
46 #include <linux/jiffies.h>
50 #include <linux/sysctl.h>
52 #include <net/ip_vs.h>
56 * It is for garbage collection of stale IPVS lblc entries,
57 * when the table is full.
59 #define CHECK_EXPIRE_INTERVAL (60*HZ)
60 #define ENTRY_TIMEOUT (6*60*HZ)
63 * It is for full expiration check.
64 * When there is no partial expiration check (garbage collection)
65 * in a half hour, do a full expiration check to collect stale
66 * entries that haven't been touched for a day.
68 #define COUNT_FOR_FULL_EXPIRATION 30
69 static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
73 * for IPVS lblc entry hash table
75 #ifndef CONFIG_IP_VS_LBLC_TAB_BITS
76 #define CONFIG_IP_VS_LBLC_TAB_BITS 10
78 #define IP_VS_LBLC_TAB_BITS CONFIG_IP_VS_LBLC_TAB_BITS
79 #define IP_VS_LBLC_TAB_SIZE (1 << IP_VS_LBLC_TAB_BITS)
80 #define IP_VS_LBLC_TAB_MASK (IP_VS_LBLC_TAB_SIZE - 1)
84 * IPVS lblc entry represents an association between destination
85 * IP address and its destination server
87 struct ip_vs_lblc_entry {
88 struct list_head list;
89 __be32 addr; /* destination IP address */
90 struct ip_vs_dest *dest; /* real server (cache) */
91 unsigned long lastuse; /* last used time */
96 * IPVS lblc hash table
98 struct ip_vs_lblc_table {
99 struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
100 atomic_t entries; /* number of entries */
101 int max_size; /* maximum size of entries */
102 struct timer_list periodic_timer; /* collect stale entries */
103 int rover; /* rover for expire check */
104 int counter; /* counter for no expire */
109 * IPVS LBLC sysctl table
112 static ctl_table vs_vars_table[] = {
114 .procname = "lblc_expiration",
115 .data = &sysctl_ip_vs_lblc_expiration,
116 .maxlen = sizeof(int),
118 .proc_handler = &proc_dointvec_jiffies,
123 static struct ctl_table_header * sysctl_header;
125 static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
129 * We don't kfree dest because it is refered either by its service
130 * or the trash dest list.
132 atomic_dec(&en->dest->refcnt);
138 * Returns hash value for IPVS LBLC entry
140 static inline unsigned ip_vs_lblc_hashkey(__be32 addr)
142 return (ntohl(addr)*2654435761UL) & IP_VS_LBLC_TAB_MASK;
147 * Hash an entry in the ip_vs_lblc_table.
148 * returns bool success.
151 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
153 unsigned hash = ip_vs_lblc_hashkey(en->addr);
155 list_add(&en->list, &tbl->bucket[hash]);
156 atomic_inc(&tbl->entries);
161 * Get ip_vs_lblc_entry associated with supplied parameters. Called under read
164 static inline struct ip_vs_lblc_entry *
165 ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
167 unsigned hash = ip_vs_lblc_hashkey(addr);
168 struct ip_vs_lblc_entry *en;
170 list_for_each_entry(en, &tbl->bucket[hash], list)
171 if (en->addr == addr)
179 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
180 * address to a server. Called under write lock.
182 static inline struct ip_vs_lblc_entry *
183 ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, __be32 daddr,
184 struct ip_vs_dest *dest)
186 struct ip_vs_lblc_entry *en;
188 en = ip_vs_lblc_get(tbl, daddr);
190 en = kmalloc(sizeof(*en), GFP_ATOMIC);
192 IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
197 en->lastuse = jiffies;
199 atomic_inc(&dest->refcnt);
202 ip_vs_lblc_hash(tbl, en);
203 } else if (en->dest != dest) {
204 atomic_dec(&en->dest->refcnt);
205 atomic_inc(&dest->refcnt);
214 * Flush all the entries of the specified table.
216 static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
218 struct ip_vs_lblc_entry *en, *nxt;
221 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
222 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
224 atomic_dec(&tbl->entries);
230 static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
232 struct ip_vs_lblc_table *tbl = svc->sched_data;
233 struct ip_vs_lblc_entry *en, *nxt;
234 unsigned long now = jiffies;
237 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
238 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
240 write_lock(&svc->sched_lock);
241 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
243 en->lastuse + sysctl_ip_vs_lblc_expiration))
247 atomic_dec(&tbl->entries);
249 write_unlock(&svc->sched_lock);
256 * Periodical timer handler for IPVS lblc table
257 * It is used to collect stale entries when the number of entries
258 * exceeds the maximum size of the table.
260 * Fixme: we probably need more complicated algorithm to collect
261 * entries that have not been used for a long time even
262 * if the number of entries doesn't exceed the maximum size
264 * The full expiration check is for this purpose now.
266 static void ip_vs_lblc_check_expire(unsigned long data)
268 struct ip_vs_service *svc = (struct ip_vs_service *) data;
269 struct ip_vs_lblc_table *tbl = svc->sched_data;
270 unsigned long now = jiffies;
273 struct ip_vs_lblc_entry *en, *nxt;
275 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
276 /* do full expiration check */
277 ip_vs_lblc_full_check(svc);
282 if (atomic_read(&tbl->entries) <= tbl->max_size) {
287 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
288 if (goal > tbl->max_size/2)
289 goal = tbl->max_size/2;
291 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
292 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
294 write_lock(&svc->sched_lock);
295 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
296 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
300 atomic_dec(&tbl->entries);
303 write_unlock(&svc->sched_lock);
310 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
314 static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
317 struct ip_vs_lblc_table *tbl;
320 * Allocate the ip_vs_lblc_table for this service
322 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
324 IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n");
327 svc->sched_data = tbl;
328 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
329 "current service\n", sizeof(*tbl));
332 * Initialize the hash buckets
334 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
335 INIT_LIST_HEAD(&tbl->bucket[i]);
337 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
342 * Hook periodic timer for garbage collection
344 setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
346 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
352 static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
354 struct ip_vs_lblc_table *tbl = svc->sched_data;
356 /* remove periodic timer */
357 del_timer_sync(&tbl->periodic_timer);
359 /* got to clean up table entries here */
360 ip_vs_lblc_flush(tbl);
362 /* release the table itself */
364 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
371 static inline struct ip_vs_dest *
372 __ip_vs_lblc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
374 struct ip_vs_dest *dest, *least;
378 * We think the overhead of processing active connections is fifty
379 * times higher than that of inactive connections in average. (This
380 * fifty times might not be accurate, we will change it later.) We
381 * use the following formula to estimate the overhead:
382 * dest->activeconns*50 + dest->inactconns
384 * (dest overhead) / dest->weight
386 * Remember -- no floats in kernel mode!!!
387 * The comparison of h1*w2 > h2*w1 is equivalent to that of
389 * if every weight is larger than zero.
391 * The server with weight=0 is quiesced and will not receive any
394 list_for_each_entry(dest, &svc->destinations, n_list) {
395 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
397 if (atomic_read(&dest->weight) > 0) {
399 loh = atomic_read(&least->activeconns) * 50
400 + atomic_read(&least->inactconns);
407 * Find the destination with the least load.
410 list_for_each_entry_continue(dest, &svc->destinations, n_list) {
411 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
414 doh = atomic_read(&dest->activeconns) * 50
415 + atomic_read(&dest->inactconns);
416 if (loh * atomic_read(&dest->weight) >
417 doh * atomic_read(&least->weight)) {
423 IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d "
424 "activeconns %d refcnt %d weight %d overhead %d\n",
425 NIPQUAD(least->addr), ntohs(least->port),
426 atomic_read(&least->activeconns),
427 atomic_read(&least->refcnt),
428 atomic_read(&least->weight), loh);
435 * If this destination server is overloaded and there is a less loaded
436 * server, then return true.
439 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
441 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
442 struct ip_vs_dest *d;
444 list_for_each_entry(d, &svc->destinations, n_list) {
445 if (atomic_read(&d->activeconns)*2
446 < atomic_read(&d->weight)) {
456 * Locality-Based (weighted) Least-Connection scheduling
458 static struct ip_vs_dest *
459 ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
461 struct ip_vs_lblc_table *tbl = svc->sched_data;
462 struct iphdr *iph = ip_hdr(skb);
463 struct ip_vs_dest *dest = NULL;
464 struct ip_vs_lblc_entry *en;
466 IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
468 /* First look in our cache */
469 read_lock(&svc->sched_lock);
470 en = ip_vs_lblc_get(tbl, iph->daddr);
472 /* We only hold a read lock, but this is atomic */
473 en->lastuse = jiffies;
476 * If the destination is not available, i.e. it's in the trash,
477 * we must ignore it, as it may be removed from under our feet,
478 * if someone drops our reference count. Our caller only makes
479 * sure that destinations, that are not in the trash, are not
480 * moved to the trash, while we are scheduling. But anyone can
481 * free up entries from the trash at any time.
484 if (en->dest->flags & IP_VS_DEST_F_AVAILABLE)
487 read_unlock(&svc->sched_lock);
489 /* If the destination has a weight and is not overloaded, use it */
490 if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
493 /* No cache entry or it is invalid, time to schedule */
494 dest = __ip_vs_lblc_schedule(svc, iph);
496 IP_VS_DBG(1, "no destination available\n");
500 /* If we fail to create a cache entry, we'll just use the valid dest */
501 write_lock(&svc->sched_lock);
502 ip_vs_lblc_new(tbl, iph->daddr, dest);
503 write_unlock(&svc->sched_lock);
506 IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u "
507 "--> server %u.%u.%u.%u:%d\n",
517 * IPVS LBLC Scheduler structure
519 static struct ip_vs_scheduler ip_vs_lblc_scheduler =
522 .refcnt = ATOMIC_INIT(0),
523 .module = THIS_MODULE,
524 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
525 .init_service = ip_vs_lblc_init_svc,
526 .done_service = ip_vs_lblc_done_svc,
527 .schedule = ip_vs_lblc_schedule,
531 static int __init ip_vs_lblc_init(void)
535 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
536 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
538 unregister_sysctl_table(sysctl_header);
543 static void __exit ip_vs_lblc_cleanup(void)
545 unregister_sysctl_table(sysctl_header);
546 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
550 module_init(ip_vs_lblc_init);
551 module_exit(ip_vs_lblc_cleanup);
552 MODULE_LICENSE("GPL");