Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[linux-2.6] / block / deadline-iosched.c
1 /*
2  *  Deadline i/o scheduler.
3  *
4  *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
5  */
6 #include <linux/kernel.h>
7 #include <linux/fs.h>
8 #include <linux/blkdev.h>
9 #include <linux/elevator.h>
10 #include <linux/bio.h>
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/hash.h>
17 #include <linux/rbtree.h>
18
19 /*
20  * See Documentation/block/deadline-iosched.txt
21  */
22 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
23 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
24 static const int writes_starved = 2;    /* max times reads can starve a write */
25 static const int fifo_batch = 16;       /* # of sequential requests treated as one
26                                      by the above parameters. For throughput. */
27
28 static const int deadline_hash_shift = 5;
29 #define DL_HASH_BLOCK(sec)      ((sec) >> 3)
30 #define DL_HASH_FN(sec)         (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
31 #define DL_HASH_ENTRIES         (1 << deadline_hash_shift)
32 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
33 #define ON_HASH(drq)            (!hlist_unhashed(&(drq)->hash))
34
35 struct deadline_data {
36         /*
37          * run time data
38          */
39
40         /*
41          * requests (deadline_rq s) are present on both sort_list and fifo_list
42          */
43         struct rb_root sort_list[2];    
44         struct list_head fifo_list[2];
45         
46         /*
47          * next in sort order. read, write or both are NULL
48          */
49         struct deadline_rq *next_drq[2];
50         struct hlist_head *hash;        /* request hash */
51         unsigned int batching;          /* number of sequential requests made */
52         sector_t last_sector;           /* head position */
53         unsigned int starved;           /* times reads have starved writes */
54
55         /*
56          * settings that change how the i/o scheduler behaves
57          */
58         int fifo_expire[2];
59         int fifo_batch;
60         int writes_starved;
61         int front_merges;
62
63         mempool_t *drq_pool;
64 };
65
66 /*
67  * pre-request data.
68  */
69 struct deadline_rq {
70         /*
71          * rbtree index, key is the starting offset
72          */
73         struct rb_node rb_node;
74         sector_t rb_key;
75
76         struct request *request;
77
78         /*
79          * request hash, key is the ending offset (for back merge lookup)
80          */
81         struct hlist_node hash;
82
83         /*
84          * expire fifo
85          */
86         struct list_head fifo;
87         unsigned long expires;
88 };
89
90 static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
91
92 static kmem_cache_t *drq_pool;
93
94 #define RQ_DATA(rq)     ((struct deadline_rq *) (rq)->elevator_private)
95
96 /*
97  * the back merge hash support functions
98  */
99 static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
100 {
101         hlist_del_init(&drq->hash);
102 }
103
104 static inline void deadline_del_drq_hash(struct deadline_rq *drq)
105 {
106         if (ON_HASH(drq))
107                 __deadline_del_drq_hash(drq);
108 }
109
110 static inline void
111 deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
112 {
113         struct request *rq = drq->request;
114
115         BUG_ON(ON_HASH(drq));
116
117         hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
118 }
119
120 /*
121  * move hot entry to front of chain
122  */
123 static inline void
124 deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
125 {
126         struct request *rq = drq->request;
127         struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
128
129         if (ON_HASH(drq) && &drq->hash != head->first) {
130                 hlist_del(&drq->hash);
131                 hlist_add_head(&drq->hash, head);
132         }
133 }
134
135 static struct request *
136 deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
137 {
138         struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
139         struct hlist_node *entry, *next;
140         struct deadline_rq *drq;
141
142         hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
143                 struct request *__rq = drq->request;
144
145                 BUG_ON(!ON_HASH(drq));
146
147                 if (!rq_mergeable(__rq)) {
148                         __deadline_del_drq_hash(drq);
149                         continue;
150                 }
151
152                 if (rq_hash_key(__rq) == offset)
153                         return __rq;
154         }
155
156         return NULL;
157 }
158
159 /*
160  * rb tree support functions
161  */
162 #define rb_entry_drq(node)      rb_entry((node), struct deadline_rq, rb_node)
163 #define DRQ_RB_ROOT(dd, drq)    (&(dd)->sort_list[rq_data_dir((drq)->request)])
164 #define rq_rb_key(rq)           (rq)->sector
165
166 static struct deadline_rq *
167 __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
168 {
169         struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
170         struct rb_node *parent = NULL;
171         struct deadline_rq *__drq;
172
173         while (*p) {
174                 parent = *p;
175                 __drq = rb_entry_drq(parent);
176
177                 if (drq->rb_key < __drq->rb_key)
178                         p = &(*p)->rb_left;
179                 else if (drq->rb_key > __drq->rb_key)
180                         p = &(*p)->rb_right;
181                 else
182                         return __drq;
183         }
184
185         rb_link_node(&drq->rb_node, parent, p);
186         return NULL;
187 }
188
189 static void
190 deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
191 {
192         struct deadline_rq *__alias;
193
194         drq->rb_key = rq_rb_key(drq->request);
195
196 retry:
197         __alias = __deadline_add_drq_rb(dd, drq);
198         if (!__alias) {
199                 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
200                 return;
201         }
202
203         deadline_move_request(dd, __alias);
204         goto retry;
205 }
206
207 static inline void
208 deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
209 {
210         const int data_dir = rq_data_dir(drq->request);
211
212         if (dd->next_drq[data_dir] == drq) {
213                 struct rb_node *rbnext = rb_next(&drq->rb_node);
214
215                 dd->next_drq[data_dir] = NULL;
216                 if (rbnext)
217                         dd->next_drq[data_dir] = rb_entry_drq(rbnext);
218         }
219
220         BUG_ON(!RB_EMPTY_NODE(&drq->rb_node));
221         rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
222         RB_CLEAR_NODE(&drq->rb_node);
223 }
224
225 static struct request *
226 deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
227 {
228         struct rb_node *n = dd->sort_list[data_dir].rb_node;
229         struct deadline_rq *drq;
230
231         while (n) {
232                 drq = rb_entry_drq(n);
233
234                 if (sector < drq->rb_key)
235                         n = n->rb_left;
236                 else if (sector > drq->rb_key)
237                         n = n->rb_right;
238                 else
239                         return drq->request;
240         }
241
242         return NULL;
243 }
244
245 /*
246  * deadline_find_first_drq finds the first (lowest sector numbered) request
247  * for the specified data_dir. Used to sweep back to the start of the disk
248  * (1-way elevator) after we process the last (highest sector) request.
249  */
250 static struct deadline_rq *
251 deadline_find_first_drq(struct deadline_data *dd, int data_dir)
252 {
253         struct rb_node *n = dd->sort_list[data_dir].rb_node;
254
255         for (;;) {
256                 if (n->rb_left == NULL)
257                         return rb_entry_drq(n);
258                 
259                 n = n->rb_left;
260         }
261 }
262
263 /*
264  * add drq to rbtree and fifo
265  */
266 static void
267 deadline_add_request(struct request_queue *q, struct request *rq)
268 {
269         struct deadline_data *dd = q->elevator->elevator_data;
270         struct deadline_rq *drq = RQ_DATA(rq);
271
272         const int data_dir = rq_data_dir(drq->request);
273
274         deadline_add_drq_rb(dd, drq);
275         /*
276          * set expire time (only used for reads) and add to fifo list
277          */
278         drq->expires = jiffies + dd->fifo_expire[data_dir];
279         list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
280
281         if (rq_mergeable(rq))
282                 deadline_add_drq_hash(dd, drq);
283 }
284
285 /*
286  * remove rq from rbtree, fifo, and hash
287  */
288 static void deadline_remove_request(request_queue_t *q, struct request *rq)
289 {
290         struct deadline_rq *drq = RQ_DATA(rq);
291         struct deadline_data *dd = q->elevator->elevator_data;
292
293         list_del_init(&drq->fifo);
294         deadline_del_drq_rb(dd, drq);
295         deadline_del_drq_hash(drq);
296 }
297
298 static int
299 deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
300 {
301         struct deadline_data *dd = q->elevator->elevator_data;
302         struct request *__rq;
303         int ret;
304
305         /*
306          * see if the merge hash can satisfy a back merge
307          */
308         __rq = deadline_find_drq_hash(dd, bio->bi_sector);
309         if (__rq) {
310                 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
311
312                 if (elv_rq_merge_ok(__rq, bio)) {
313                         ret = ELEVATOR_BACK_MERGE;
314                         goto out;
315                 }
316         }
317
318         /*
319          * check for front merge
320          */
321         if (dd->front_merges) {
322                 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
323
324                 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
325                 if (__rq) {
326                         BUG_ON(rb_key != rq_rb_key(__rq));
327
328                         if (elv_rq_merge_ok(__rq, bio)) {
329                                 ret = ELEVATOR_FRONT_MERGE;
330                                 goto out;
331                         }
332                 }
333         }
334
335         return ELEVATOR_NO_MERGE;
336 out:
337         if (ret)
338                 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
339         *req = __rq;
340         return ret;
341 }
342
343 static void deadline_merged_request(request_queue_t *q, struct request *req)
344 {
345         struct deadline_data *dd = q->elevator->elevator_data;
346         struct deadline_rq *drq = RQ_DATA(req);
347
348         /*
349          * hash always needs to be repositioned, key is end sector
350          */
351         deadline_del_drq_hash(drq);
352         deadline_add_drq_hash(dd, drq);
353
354         /*
355          * if the merge was a front merge, we need to reposition request
356          */
357         if (rq_rb_key(req) != drq->rb_key) {
358                 deadline_del_drq_rb(dd, drq);
359                 deadline_add_drq_rb(dd, drq);
360         }
361 }
362
363 static void
364 deadline_merged_requests(request_queue_t *q, struct request *req,
365                          struct request *next)
366 {
367         struct deadline_data *dd = q->elevator->elevator_data;
368         struct deadline_rq *drq = RQ_DATA(req);
369         struct deadline_rq *dnext = RQ_DATA(next);
370
371         BUG_ON(!drq);
372         BUG_ON(!dnext);
373
374         /*
375          * reposition drq (this is the merged request) in hash, and in rbtree
376          * in case of a front merge
377          */
378         deadline_del_drq_hash(drq);
379         deadline_add_drq_hash(dd, drq);
380
381         if (rq_rb_key(req) != drq->rb_key) {
382                 deadline_del_drq_rb(dd, drq);
383                 deadline_add_drq_rb(dd, drq);
384         }
385
386         /*
387          * if dnext expires before drq, assign its expire time to drq
388          * and move into dnext position (dnext will be deleted) in fifo
389          */
390         if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
391                 if (time_before(dnext->expires, drq->expires)) {
392                         list_move(&drq->fifo, &dnext->fifo);
393                         drq->expires = dnext->expires;
394                 }
395         }
396
397         /*
398          * kill knowledge of next, this one is a goner
399          */
400         deadline_remove_request(q, next);
401 }
402
403 /*
404  * move request from sort list to dispatch queue.
405  */
406 static inline void
407 deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
408 {
409         request_queue_t *q = drq->request->q;
410
411         deadline_remove_request(q, drq->request);
412         elv_dispatch_add_tail(q, drq->request);
413 }
414
415 /*
416  * move an entry to dispatch queue
417  */
418 static void
419 deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
420 {
421         const int data_dir = rq_data_dir(drq->request);
422         struct rb_node *rbnext = rb_next(&drq->rb_node);
423
424         dd->next_drq[READ] = NULL;
425         dd->next_drq[WRITE] = NULL;
426
427         if (rbnext)
428                 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
429         
430         dd->last_sector = drq->request->sector + drq->request->nr_sectors;
431
432         /*
433          * take it off the sort and fifo list, move
434          * to dispatch queue
435          */
436         deadline_move_to_dispatch(dd, drq);
437 }
438
439 #define list_entry_fifo(ptr)    list_entry((ptr), struct deadline_rq, fifo)
440
441 /*
442  * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
443  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
444  */
445 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
446 {
447         struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
448
449         /*
450          * drq is expired!
451          */
452         if (time_after(jiffies, drq->expires))
453                 return 1;
454
455         return 0;
456 }
457
458 /*
459  * deadline_dispatch_requests selects the best request according to
460  * read/write expire, fifo_batch, etc
461  */
462 static int deadline_dispatch_requests(request_queue_t *q, int force)
463 {
464         struct deadline_data *dd = q->elevator->elevator_data;
465         const int reads = !list_empty(&dd->fifo_list[READ]);
466         const int writes = !list_empty(&dd->fifo_list[WRITE]);
467         struct deadline_rq *drq;
468         int data_dir;
469
470         /*
471          * batches are currently reads XOR writes
472          */
473         if (dd->next_drq[WRITE])
474                 drq = dd->next_drq[WRITE];
475         else
476                 drq = dd->next_drq[READ];
477
478         if (drq) {
479                 /* we have a "next request" */
480                 
481                 if (dd->last_sector != drq->request->sector)
482                         /* end the batch on a non sequential request */
483                         dd->batching += dd->fifo_batch;
484                 
485                 if (dd->batching < dd->fifo_batch)
486                         /* we are still entitled to batch */
487                         goto dispatch_request;
488         }
489
490         /*
491          * at this point we are not running a batch. select the appropriate
492          * data direction (read / write)
493          */
494
495         if (reads) {
496                 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
497
498                 if (writes && (dd->starved++ >= dd->writes_starved))
499                         goto dispatch_writes;
500
501                 data_dir = READ;
502
503                 goto dispatch_find_request;
504         }
505
506         /*
507          * there are either no reads or writes have been starved
508          */
509
510         if (writes) {
511 dispatch_writes:
512                 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
513
514                 dd->starved = 0;
515
516                 data_dir = WRITE;
517
518                 goto dispatch_find_request;
519         }
520
521         return 0;
522
523 dispatch_find_request:
524         /*
525          * we are not running a batch, find best request for selected data_dir
526          */
527         if (deadline_check_fifo(dd, data_dir)) {
528                 /* An expired request exists - satisfy it */
529                 dd->batching = 0;
530                 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
531                 
532         } else if (dd->next_drq[data_dir]) {
533                 /*
534                  * The last req was the same dir and we have a next request in
535                  * sort order. No expired requests so continue on from here.
536                  */
537                 drq = dd->next_drq[data_dir];
538         } else {
539                 /*
540                  * The last req was the other direction or we have run out of
541                  * higher-sectored requests. Go back to the lowest sectored
542                  * request (1 way elevator) and start a new batch.
543                  */
544                 dd->batching = 0;
545                 drq = deadline_find_first_drq(dd, data_dir);
546         }
547
548 dispatch_request:
549         /*
550          * drq is the selected appropriate request.
551          */
552         dd->batching++;
553         deadline_move_request(dd, drq);
554
555         return 1;
556 }
557
558 static int deadline_queue_empty(request_queue_t *q)
559 {
560         struct deadline_data *dd = q->elevator->elevator_data;
561
562         return list_empty(&dd->fifo_list[WRITE])
563                 && list_empty(&dd->fifo_list[READ]);
564 }
565
566 static struct request *
567 deadline_former_request(request_queue_t *q, struct request *rq)
568 {
569         struct deadline_rq *drq = RQ_DATA(rq);
570         struct rb_node *rbprev = rb_prev(&drq->rb_node);
571
572         if (rbprev)
573                 return rb_entry_drq(rbprev)->request;
574
575         return NULL;
576 }
577
578 static struct request *
579 deadline_latter_request(request_queue_t *q, struct request *rq)
580 {
581         struct deadline_rq *drq = RQ_DATA(rq);
582         struct rb_node *rbnext = rb_next(&drq->rb_node);
583
584         if (rbnext)
585                 return rb_entry_drq(rbnext)->request;
586
587         return NULL;
588 }
589
590 static void deadline_exit_queue(elevator_t *e)
591 {
592         struct deadline_data *dd = e->elevator_data;
593
594         BUG_ON(!list_empty(&dd->fifo_list[READ]));
595         BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
596
597         mempool_destroy(dd->drq_pool);
598         kfree(dd->hash);
599         kfree(dd);
600 }
601
602 /*
603  * initialize elevator private data (deadline_data), and alloc a drq for
604  * each request on the free lists
605  */
606 static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
607 {
608         struct deadline_data *dd;
609         int i;
610
611         if (!drq_pool)
612                 return NULL;
613
614         dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
615         if (!dd)
616                 return NULL;
617         memset(dd, 0, sizeof(*dd));
618
619         dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
620                                 GFP_KERNEL, q->node);
621         if (!dd->hash) {
622                 kfree(dd);
623                 return NULL;
624         }
625
626         dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
627                                         mempool_free_slab, drq_pool, q->node);
628         if (!dd->drq_pool) {
629                 kfree(dd->hash);
630                 kfree(dd);
631                 return NULL;
632         }
633
634         for (i = 0; i < DL_HASH_ENTRIES; i++)
635                 INIT_HLIST_HEAD(&dd->hash[i]);
636
637         INIT_LIST_HEAD(&dd->fifo_list[READ]);
638         INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
639         dd->sort_list[READ] = RB_ROOT;
640         dd->sort_list[WRITE] = RB_ROOT;
641         dd->fifo_expire[READ] = read_expire;
642         dd->fifo_expire[WRITE] = write_expire;
643         dd->writes_starved = writes_starved;
644         dd->front_merges = 1;
645         dd->fifo_batch = fifo_batch;
646         return dd;
647 }
648
649 static void deadline_put_request(request_queue_t *q, struct request *rq)
650 {
651         struct deadline_data *dd = q->elevator->elevator_data;
652         struct deadline_rq *drq = RQ_DATA(rq);
653
654         mempool_free(drq, dd->drq_pool);
655         rq->elevator_private = NULL;
656 }
657
658 static int
659 deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
660                      gfp_t gfp_mask)
661 {
662         struct deadline_data *dd = q->elevator->elevator_data;
663         struct deadline_rq *drq;
664
665         drq = mempool_alloc(dd->drq_pool, gfp_mask);
666         if (drq) {
667                 memset(drq, 0, sizeof(*drq));
668                 RB_CLEAR_NODE(&drq->rb_node);
669                 drq->request = rq;
670
671                 INIT_HLIST_NODE(&drq->hash);
672
673                 INIT_LIST_HEAD(&drq->fifo);
674
675                 rq->elevator_private = drq;
676                 return 0;
677         }
678
679         return 1;
680 }
681
682 /*
683  * sysfs parts below
684  */
685
686 static ssize_t
687 deadline_var_show(int var, char *page)
688 {
689         return sprintf(page, "%d\n", var);
690 }
691
692 static ssize_t
693 deadline_var_store(int *var, const char *page, size_t count)
694 {
695         char *p = (char *) page;
696
697         *var = simple_strtol(p, &p, 10);
698         return count;
699 }
700
701 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
702 static ssize_t __FUNC(elevator_t *e, char *page)                        \
703 {                                                                       \
704         struct deadline_data *dd = e->elevator_data;                    \
705         int __data = __VAR;                                             \
706         if (__CONV)                                                     \
707                 __data = jiffies_to_msecs(__data);                      \
708         return deadline_var_show(__data, (page));                       \
709 }
710 SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
711 SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
712 SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
713 SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
714 SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
715 #undef SHOW_FUNCTION
716
717 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
718 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
719 {                                                                       \
720         struct deadline_data *dd = e->elevator_data;                    \
721         int __data;                                                     \
722         int ret = deadline_var_store(&__data, (page), count);           \
723         if (__data < (MIN))                                             \
724                 __data = (MIN);                                         \
725         else if (__data > (MAX))                                        \
726                 __data = (MAX);                                         \
727         if (__CONV)                                                     \
728                 *(__PTR) = msecs_to_jiffies(__data);                    \
729         else                                                            \
730                 *(__PTR) = __data;                                      \
731         return ret;                                                     \
732 }
733 STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
734 STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
735 STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
736 STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
737 STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
738 #undef STORE_FUNCTION
739
740 #define DD_ATTR(name) \
741         __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
742                                       deadline_##name##_store)
743
744 static struct elv_fs_entry deadline_attrs[] = {
745         DD_ATTR(read_expire),
746         DD_ATTR(write_expire),
747         DD_ATTR(writes_starved),
748         DD_ATTR(front_merges),
749         DD_ATTR(fifo_batch),
750         __ATTR_NULL
751 };
752
753 static struct elevator_type iosched_deadline = {
754         .ops = {
755                 .elevator_merge_fn =            deadline_merge,
756                 .elevator_merged_fn =           deadline_merged_request,
757                 .elevator_merge_req_fn =        deadline_merged_requests,
758                 .elevator_dispatch_fn =         deadline_dispatch_requests,
759                 .elevator_add_req_fn =          deadline_add_request,
760                 .elevator_queue_empty_fn =      deadline_queue_empty,
761                 .elevator_former_req_fn =       deadline_former_request,
762                 .elevator_latter_req_fn =       deadline_latter_request,
763                 .elevator_set_req_fn =          deadline_set_request,
764                 .elevator_put_req_fn =          deadline_put_request,
765                 .elevator_init_fn =             deadline_init_queue,
766                 .elevator_exit_fn =             deadline_exit_queue,
767         },
768
769         .elevator_attrs = deadline_attrs,
770         .elevator_name = "deadline",
771         .elevator_owner = THIS_MODULE,
772 };
773
774 static int __init deadline_init(void)
775 {
776         int ret;
777
778         drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
779                                      0, 0, NULL, NULL);
780
781         if (!drq_pool)
782                 return -ENOMEM;
783
784         ret = elv_register(&iosched_deadline);
785         if (ret)
786                 kmem_cache_destroy(drq_pool);
787
788         return ret;
789 }
790
791 static void __exit deadline_exit(void)
792 {
793         kmem_cache_destroy(drq_pool);
794         elv_unregister(&iosched_deadline);
795 }
796
797 module_init(deadline_init);
798 module_exit(deadline_exit);
799
800 MODULE_AUTHOR("Jens Axboe");
801 MODULE_LICENSE("GPL");
802 MODULE_DESCRIPTION("deadline IO scheduler");