2 * linux/drivers/block/deadline-iosched.c
4 * Deadline i/o scheduler.
6 * Copyright (C) 2002 Jens Axboe <axboe@suse.de>
8 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/bio.h>
13 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/hash.h>
19 #include <linux/rbtree.h>
22 * See Documentation/block/deadline-iosched.txt
24 static int read_expire = HZ / 2; /* max time before a read is submitted. */
25 static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
26 static int writes_starved = 2; /* max times reads can starve a write */
27 static int fifo_batch = 16; /* # of sequential requests treated as one
28 by the above parameters. For throughput. */
30 static const int deadline_hash_shift = 5;
31 #define DL_HASH_BLOCK(sec) ((sec) >> 3)
32 #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
33 #define DL_HASH_ENTRIES (1 << deadline_hash_shift)
34 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
35 #define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash)
36 #define ON_HASH(drq) (drq)->on_hash
38 struct deadline_data {
44 * requests (deadline_rq s) are present on both sort_list and fifo_list
46 struct rb_root sort_list[2];
47 struct list_head fifo_list[2];
50 * next in sort order. read, write or both are NULL
52 struct deadline_rq *next_drq[2];
53 struct list_head *hash; /* request hash */
54 unsigned int batching; /* number of sequential requests made */
55 sector_t last_sector; /* head position */
56 unsigned int starved; /* times reads have starved writes */
59 * settings that change how the i/o scheduler behaves
74 * rbtree index, key is the starting offset
76 struct rb_node rb_node;
79 struct request *request;
82 * request hash, key is the ending offset (for back merge lookup)
84 struct list_head hash;
90 struct list_head fifo;
91 unsigned long expires;
94 static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
96 static kmem_cache_t *drq_pool;
98 #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
101 * the back merge hash support functions
103 static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
106 list_del_init(&drq->hash);
109 static inline void deadline_del_drq_hash(struct deadline_rq *drq)
112 __deadline_del_drq_hash(drq);
116 deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
118 struct request *rq = drq->request;
120 BUG_ON(ON_HASH(drq));
123 list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
127 * move hot entry to front of chain
130 deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
132 struct request *rq = drq->request;
133 struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
135 if (ON_HASH(drq) && drq->hash.prev != head) {
136 list_del(&drq->hash);
137 list_add(&drq->hash, head);
141 static struct request *
142 deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
144 struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
145 struct list_head *entry, *next = hash_list->next;
147 while ((entry = next) != hash_list) {
148 struct deadline_rq *drq = list_entry_hash(entry);
149 struct request *__rq = drq->request;
153 BUG_ON(!ON_HASH(drq));
155 if (!rq_mergeable(__rq)) {
156 __deadline_del_drq_hash(drq);
160 if (rq_hash_key(__rq) == offset)
168 * rb tree support functions
171 #define RB_EMPTY(root) ((root)->rb_node == NULL)
172 #define ON_RB(node) ((node)->rb_color != RB_NONE)
173 #define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
174 #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
175 #define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
176 #define rq_rb_key(rq) (rq)->sector
178 static struct deadline_rq *
179 __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
181 struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
182 struct rb_node *parent = NULL;
183 struct deadline_rq *__drq;
187 __drq = rb_entry_drq(parent);
189 if (drq->rb_key < __drq->rb_key)
191 else if (drq->rb_key > __drq->rb_key)
197 rb_link_node(&drq->rb_node, parent, p);
202 deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
204 struct deadline_rq *__alias;
206 drq->rb_key = rq_rb_key(drq->request);
209 __alias = __deadline_add_drq_rb(dd, drq);
211 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
215 deadline_move_request(dd, __alias);
220 deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
222 const int data_dir = rq_data_dir(drq->request);
224 if (dd->next_drq[data_dir] == drq) {
225 struct rb_node *rbnext = rb_next(&drq->rb_node);
227 dd->next_drq[data_dir] = NULL;
229 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
232 BUG_ON(!ON_RB(&drq->rb_node));
233 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
234 RB_CLEAR(&drq->rb_node);
237 static struct request *
238 deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
240 struct rb_node *n = dd->sort_list[data_dir].rb_node;
241 struct deadline_rq *drq;
244 drq = rb_entry_drq(n);
246 if (sector < drq->rb_key)
248 else if (sector > drq->rb_key)
258 * deadline_find_first_drq finds the first (lowest sector numbered) request
259 * for the specified data_dir. Used to sweep back to the start of the disk
260 * (1-way elevator) after we process the last (highest sector) request.
262 static struct deadline_rq *
263 deadline_find_first_drq(struct deadline_data *dd, int data_dir)
265 struct rb_node *n = dd->sort_list[data_dir].rb_node;
268 if (n->rb_left == NULL)
269 return rb_entry_drq(n);
276 * add drq to rbtree and fifo
279 deadline_add_request(struct request_queue *q, struct request *rq)
281 struct deadline_data *dd = q->elevator->elevator_data;
282 struct deadline_rq *drq = RQ_DATA(rq);
284 const int data_dir = rq_data_dir(drq->request);
286 deadline_add_drq_rb(dd, drq);
288 * set expire time (only used for reads) and add to fifo list
290 drq->expires = jiffies + dd->fifo_expire[data_dir];
291 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
293 if (rq_mergeable(rq))
294 deadline_add_drq_hash(dd, drq);
298 * remove rq from rbtree, fifo, and hash
300 static void deadline_remove_request(request_queue_t *q, struct request *rq)
302 struct deadline_rq *drq = RQ_DATA(rq);
303 struct deadline_data *dd = q->elevator->elevator_data;
305 list_del_init(&drq->fifo);
306 deadline_del_drq_rb(dd, drq);
307 deadline_del_drq_hash(drq);
311 deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
313 struct deadline_data *dd = q->elevator->elevator_data;
314 struct request *__rq;
318 * see if the merge hash can satisfy a back merge
320 __rq = deadline_find_drq_hash(dd, bio->bi_sector);
322 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
324 if (elv_rq_merge_ok(__rq, bio)) {
325 ret = ELEVATOR_BACK_MERGE;
331 * check for front merge
333 if (dd->front_merges) {
334 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
336 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
338 BUG_ON(rb_key != rq_rb_key(__rq));
340 if (elv_rq_merge_ok(__rq, bio)) {
341 ret = ELEVATOR_FRONT_MERGE;
347 return ELEVATOR_NO_MERGE;
350 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
355 static void deadline_merged_request(request_queue_t *q, struct request *req)
357 struct deadline_data *dd = q->elevator->elevator_data;
358 struct deadline_rq *drq = RQ_DATA(req);
361 * hash always needs to be repositioned, key is end sector
363 deadline_del_drq_hash(drq);
364 deadline_add_drq_hash(dd, drq);
367 * if the merge was a front merge, we need to reposition request
369 if (rq_rb_key(req) != drq->rb_key) {
370 deadline_del_drq_rb(dd, drq);
371 deadline_add_drq_rb(dd, drq);
376 deadline_merged_requests(request_queue_t *q, struct request *req,
377 struct request *next)
379 struct deadline_data *dd = q->elevator->elevator_data;
380 struct deadline_rq *drq = RQ_DATA(req);
381 struct deadline_rq *dnext = RQ_DATA(next);
387 * reposition drq (this is the merged request) in hash, and in rbtree
388 * in case of a front merge
390 deadline_del_drq_hash(drq);
391 deadline_add_drq_hash(dd, drq);
393 if (rq_rb_key(req) != drq->rb_key) {
394 deadline_del_drq_rb(dd, drq);
395 deadline_add_drq_rb(dd, drq);
399 * if dnext expires before drq, assign its expire time to drq
400 * and move into dnext position (dnext will be deleted) in fifo
402 if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
403 if (time_before(dnext->expires, drq->expires)) {
404 list_move(&drq->fifo, &dnext->fifo);
405 drq->expires = dnext->expires;
410 * kill knowledge of next, this one is a goner
412 deadline_remove_request(q, next);
416 * move request from sort list to dispatch queue.
419 deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
421 request_queue_t *q = drq->request->q;
423 deadline_remove_request(q, drq->request);
424 elv_dispatch_add_tail(q, drq->request);
428 * move an entry to dispatch queue
431 deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
433 const int data_dir = rq_data_dir(drq->request);
434 struct rb_node *rbnext = rb_next(&drq->rb_node);
436 dd->next_drq[READ] = NULL;
437 dd->next_drq[WRITE] = NULL;
440 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
442 dd->last_sector = drq->request->sector + drq->request->nr_sectors;
445 * take it off the sort and fifo list, move
448 deadline_move_to_dispatch(dd, drq);
451 #define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo)
454 * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
455 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
457 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
459 struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
464 if (time_after(jiffies, drq->expires))
471 * deadline_dispatch_requests selects the best request according to
472 * read/write expire, fifo_batch, etc
474 static int deadline_dispatch_requests(request_queue_t *q, int force)
476 struct deadline_data *dd = q->elevator->elevator_data;
477 const int reads = !list_empty(&dd->fifo_list[READ]);
478 const int writes = !list_empty(&dd->fifo_list[WRITE]);
479 struct deadline_rq *drq;
483 * batches are currently reads XOR writes
485 if (dd->next_drq[WRITE])
486 drq = dd->next_drq[WRITE];
488 drq = dd->next_drq[READ];
491 /* we have a "next request" */
493 if (dd->last_sector != drq->request->sector)
494 /* end the batch on a non sequential request */
495 dd->batching += dd->fifo_batch;
497 if (dd->batching < dd->fifo_batch)
498 /* we are still entitled to batch */
499 goto dispatch_request;
503 * at this point we are not running a batch. select the appropriate
504 * data direction (read / write)
508 BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
510 if (writes && (dd->starved++ >= dd->writes_starved))
511 goto dispatch_writes;
515 goto dispatch_find_request;
519 * there are either no reads or writes have been starved
524 BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
530 goto dispatch_find_request;
535 dispatch_find_request:
537 * we are not running a batch, find best request for selected data_dir
539 if (deadline_check_fifo(dd, data_dir)) {
540 /* An expired request exists - satisfy it */
542 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
544 } else if (dd->next_drq[data_dir]) {
546 * The last req was the same dir and we have a next request in
547 * sort order. No expired requests so continue on from here.
549 drq = dd->next_drq[data_dir];
552 * The last req was the other direction or we have run out of
553 * higher-sectored requests. Go back to the lowest sectored
554 * request (1 way elevator) and start a new batch.
557 drq = deadline_find_first_drq(dd, data_dir);
562 * drq is the selected appropriate request.
565 deadline_move_request(dd, drq);
570 static int deadline_queue_empty(request_queue_t *q)
572 struct deadline_data *dd = q->elevator->elevator_data;
574 return list_empty(&dd->fifo_list[WRITE])
575 && list_empty(&dd->fifo_list[READ]);
578 static struct request *
579 deadline_former_request(request_queue_t *q, struct request *rq)
581 struct deadline_rq *drq = RQ_DATA(rq);
582 struct rb_node *rbprev = rb_prev(&drq->rb_node);
585 return rb_entry_drq(rbprev)->request;
590 static struct request *
591 deadline_latter_request(request_queue_t *q, struct request *rq)
593 struct deadline_rq *drq = RQ_DATA(rq);
594 struct rb_node *rbnext = rb_next(&drq->rb_node);
597 return rb_entry_drq(rbnext)->request;
602 static void deadline_exit_queue(elevator_t *e)
604 struct deadline_data *dd = e->elevator_data;
606 BUG_ON(!list_empty(&dd->fifo_list[READ]));
607 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
609 mempool_destroy(dd->drq_pool);
615 * initialize elevator private data (deadline_data), and alloc a drq for
616 * each request on the free lists
618 static int deadline_init_queue(request_queue_t *q, elevator_t *e)
620 struct deadline_data *dd;
626 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
629 memset(dd, 0, sizeof(*dd));
631 dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
632 GFP_KERNEL, q->node);
638 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
639 mempool_free_slab, drq_pool, q->node);
646 for (i = 0; i < DL_HASH_ENTRIES; i++)
647 INIT_LIST_HEAD(&dd->hash[i]);
649 INIT_LIST_HEAD(&dd->fifo_list[READ]);
650 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
651 dd->sort_list[READ] = RB_ROOT;
652 dd->sort_list[WRITE] = RB_ROOT;
653 dd->fifo_expire[READ] = read_expire;
654 dd->fifo_expire[WRITE] = write_expire;
655 dd->writes_starved = writes_starved;
656 dd->front_merges = 1;
657 dd->fifo_batch = fifo_batch;
658 e->elevator_data = dd;
662 static void deadline_put_request(request_queue_t *q, struct request *rq)
664 struct deadline_data *dd = q->elevator->elevator_data;
665 struct deadline_rq *drq = RQ_DATA(rq);
667 mempool_free(drq, dd->drq_pool);
668 rq->elevator_private = NULL;
672 deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
675 struct deadline_data *dd = q->elevator->elevator_data;
676 struct deadline_rq *drq;
678 drq = mempool_alloc(dd->drq_pool, gfp_mask);
680 memset(drq, 0, sizeof(*drq));
681 RB_CLEAR(&drq->rb_node);
684 INIT_LIST_HEAD(&drq->hash);
687 INIT_LIST_HEAD(&drq->fifo);
689 rq->elevator_private = drq;
699 struct deadline_fs_entry {
700 struct attribute attr;
701 ssize_t (*show)(struct deadline_data *, char *);
702 ssize_t (*store)(struct deadline_data *, const char *, size_t);
706 deadline_var_show(int var, char *page)
708 return sprintf(page, "%d\n", var);
712 deadline_var_store(int *var, const char *page, size_t count)
714 char *p = (char *) page;
716 *var = simple_strtol(p, &p, 10);
720 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
721 static ssize_t __FUNC(struct deadline_data *dd, char *page) \
723 int __data = __VAR; \
725 __data = jiffies_to_msecs(__data); \
726 return deadline_var_show(__data, (page)); \
728 SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
729 SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
730 SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
731 SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
732 SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
735 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
736 static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \
739 int ret = deadline_var_store(&__data, (page), count); \
740 if (__data < (MIN)) \
742 else if (__data > (MAX)) \
745 *(__PTR) = msecs_to_jiffies(__data); \
750 STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
751 STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
752 STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
753 STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
754 STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
755 #undef STORE_FUNCTION
757 static struct deadline_fs_entry deadline_readexpire_entry = {
758 .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
759 .show = deadline_readexpire_show,
760 .store = deadline_readexpire_store,
762 static struct deadline_fs_entry deadline_writeexpire_entry = {
763 .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
764 .show = deadline_writeexpire_show,
765 .store = deadline_writeexpire_store,
767 static struct deadline_fs_entry deadline_writesstarved_entry = {
768 .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
769 .show = deadline_writesstarved_show,
770 .store = deadline_writesstarved_store,
772 static struct deadline_fs_entry deadline_frontmerges_entry = {
773 .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
774 .show = deadline_frontmerges_show,
775 .store = deadline_frontmerges_store,
777 static struct deadline_fs_entry deadline_fifobatch_entry = {
778 .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
779 .show = deadline_fifobatch_show,
780 .store = deadline_fifobatch_store,
783 static struct attribute *default_attrs[] = {
784 &deadline_readexpire_entry.attr,
785 &deadline_writeexpire_entry.attr,
786 &deadline_writesstarved_entry.attr,
787 &deadline_frontmerges_entry.attr,
788 &deadline_fifobatch_entry.attr,
792 #define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
795 deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
797 elevator_t *e = container_of(kobj, elevator_t, kobj);
798 struct deadline_fs_entry *entry = to_deadline(attr);
803 return entry->show(e->elevator_data, page);
807 deadline_attr_store(struct kobject *kobj, struct attribute *attr,
808 const char *page, size_t length)
810 elevator_t *e = container_of(kobj, elevator_t, kobj);
811 struct deadline_fs_entry *entry = to_deadline(attr);
816 return entry->store(e->elevator_data, page, length);
819 static struct sysfs_ops deadline_sysfs_ops = {
820 .show = deadline_attr_show,
821 .store = deadline_attr_store,
824 static struct kobj_type deadline_ktype = {
825 .sysfs_ops = &deadline_sysfs_ops,
826 .default_attrs = default_attrs,
829 static struct elevator_type iosched_deadline = {
831 .elevator_merge_fn = deadline_merge,
832 .elevator_merged_fn = deadline_merged_request,
833 .elevator_merge_req_fn = deadline_merged_requests,
834 .elevator_dispatch_fn = deadline_dispatch_requests,
835 .elevator_add_req_fn = deadline_add_request,
836 .elevator_queue_empty_fn = deadline_queue_empty,
837 .elevator_former_req_fn = deadline_former_request,
838 .elevator_latter_req_fn = deadline_latter_request,
839 .elevator_set_req_fn = deadline_set_request,
840 .elevator_put_req_fn = deadline_put_request,
841 .elevator_init_fn = deadline_init_queue,
842 .elevator_exit_fn = deadline_exit_queue,
845 .elevator_ktype = &deadline_ktype,
846 .elevator_name = "deadline",
847 .elevator_owner = THIS_MODULE,
850 static int __init deadline_init(void)
854 drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
860 ret = elv_register(&iosched_deadline);
862 kmem_cache_destroy(drq_pool);
867 static void __exit deadline_exit(void)
869 kmem_cache_destroy(drq_pool);
870 elv_unregister(&iosched_deadline);
873 module_init(deadline_init);
874 module_exit(deadline_exit);
876 MODULE_AUTHOR("Jens Axboe");
877 MODULE_LICENSE("GPL");
878 MODULE_DESCRIPTION("deadline IO scheduler");