[NetLabel]: tie NetLabel into the Kconfig system
[linux-2.6] / block / deadline-iosched.c
1 /*
2  *  Deadline i/o scheduler.
3  *
4  *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
5  */
6 #include <linux/kernel.h>
7 #include <linux/fs.h>
8 #include <linux/blkdev.h>
9 #include <linux/elevator.h>
10 #include <linux/bio.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/compiler.h>
15 #include <linux/hash.h>
16 #include <linux/rbtree.h>
17
18 /*
19  * See Documentation/block/deadline-iosched.txt
20  */
21 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
22 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
23 static const int writes_starved = 2;    /* max times reads can starve a write */
24 static const int fifo_batch = 16;       /* # of sequential requests treated as one
25                                      by the above parameters. For throughput. */
26
27 static const int deadline_hash_shift = 5;
28 #define DL_HASH_BLOCK(sec)      ((sec) >> 3)
29 #define DL_HASH_FN(sec)         (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
30 #define DL_HASH_ENTRIES         (1 << deadline_hash_shift)
31 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
32 #define ON_HASH(drq)            (!hlist_unhashed(&(drq)->hash))
33
34 struct deadline_data {
35         /*
36          * run time data
37          */
38
39         /*
40          * requests (deadline_rq s) are present on both sort_list and fifo_list
41          */
42         struct rb_root sort_list[2];    
43         struct list_head fifo_list[2];
44         
45         /*
46          * next in sort order. read, write or both are NULL
47          */
48         struct deadline_rq *next_drq[2];
49         struct hlist_head *hash;        /* request hash */
50         unsigned int batching;          /* number of sequential requests made */
51         sector_t last_sector;           /* head position */
52         unsigned int starved;           /* times reads have starved writes */
53
54         /*
55          * settings that change how the i/o scheduler behaves
56          */
57         int fifo_expire[2];
58         int fifo_batch;
59         int writes_starved;
60         int front_merges;
61
62         mempool_t *drq_pool;
63 };
64
65 /*
66  * pre-request data.
67  */
68 struct deadline_rq {
69         /*
70          * rbtree index, key is the starting offset
71          */
72         struct rb_node rb_node;
73         sector_t rb_key;
74
75         struct request *request;
76
77         /*
78          * request hash, key is the ending offset (for back merge lookup)
79          */
80         struct hlist_node hash;
81
82         /*
83          * expire fifo
84          */
85         struct list_head fifo;
86         unsigned long expires;
87 };
88
89 static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
90
91 static kmem_cache_t *drq_pool;
92
93 #define RQ_DATA(rq)     ((struct deadline_rq *) (rq)->elevator_private)
94
95 /*
96  * the back merge hash support functions
97  */
98 static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
99 {
100         hlist_del_init(&drq->hash);
101 }
102
103 static inline void deadline_del_drq_hash(struct deadline_rq *drq)
104 {
105         if (ON_HASH(drq))
106                 __deadline_del_drq_hash(drq);
107 }
108
109 static inline void
110 deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
111 {
112         struct request *rq = drq->request;
113
114         BUG_ON(ON_HASH(drq));
115
116         hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
117 }
118
119 /*
120  * move hot entry to front of chain
121  */
122 static inline void
123 deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
124 {
125         struct request *rq = drq->request;
126         struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
127
128         if (ON_HASH(drq) && &drq->hash != head->first) {
129                 hlist_del(&drq->hash);
130                 hlist_add_head(&drq->hash, head);
131         }
132 }
133
134 static struct request *
135 deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
136 {
137         struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
138         struct hlist_node *entry, *next;
139         struct deadline_rq *drq;
140
141         hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
142                 struct request *__rq = drq->request;
143
144                 BUG_ON(!ON_HASH(drq));
145
146                 if (!rq_mergeable(__rq)) {
147                         __deadline_del_drq_hash(drq);
148                         continue;
149                 }
150
151                 if (rq_hash_key(__rq) == offset)
152                         return __rq;
153         }
154
155         return NULL;
156 }
157
158 /*
159  * rb tree support functions
160  */
161 #define rb_entry_drq(node)      rb_entry((node), struct deadline_rq, rb_node)
162 #define DRQ_RB_ROOT(dd, drq)    (&(dd)->sort_list[rq_data_dir((drq)->request)])
163 #define rq_rb_key(rq)           (rq)->sector
164
165 static struct deadline_rq *
166 __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
167 {
168         struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
169         struct rb_node *parent = NULL;
170         struct deadline_rq *__drq;
171
172         while (*p) {
173                 parent = *p;
174                 __drq = rb_entry_drq(parent);
175
176                 if (drq->rb_key < __drq->rb_key)
177                         p = &(*p)->rb_left;
178                 else if (drq->rb_key > __drq->rb_key)
179                         p = &(*p)->rb_right;
180                 else
181                         return __drq;
182         }
183
184         rb_link_node(&drq->rb_node, parent, p);
185         return NULL;
186 }
187
188 static void
189 deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
190 {
191         struct deadline_rq *__alias;
192
193         drq->rb_key = rq_rb_key(drq->request);
194
195 retry:
196         __alias = __deadline_add_drq_rb(dd, drq);
197         if (!__alias) {
198                 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
199                 return;
200         }
201
202         deadline_move_request(dd, __alias);
203         goto retry;
204 }
205
206 static inline void
207 deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
208 {
209         const int data_dir = rq_data_dir(drq->request);
210
211         if (dd->next_drq[data_dir] == drq) {
212                 struct rb_node *rbnext = rb_next(&drq->rb_node);
213
214                 dd->next_drq[data_dir] = NULL;
215                 if (rbnext)
216                         dd->next_drq[data_dir] = rb_entry_drq(rbnext);
217         }
218
219         BUG_ON(!RB_EMPTY_NODE(&drq->rb_node));
220         rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
221         RB_CLEAR_NODE(&drq->rb_node);
222 }
223
224 static struct request *
225 deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
226 {
227         struct rb_node *n = dd->sort_list[data_dir].rb_node;
228         struct deadline_rq *drq;
229
230         while (n) {
231                 drq = rb_entry_drq(n);
232
233                 if (sector < drq->rb_key)
234                         n = n->rb_left;
235                 else if (sector > drq->rb_key)
236                         n = n->rb_right;
237                 else
238                         return drq->request;
239         }
240
241         return NULL;
242 }
243
244 /*
245  * deadline_find_first_drq finds the first (lowest sector numbered) request
246  * for the specified data_dir. Used to sweep back to the start of the disk
247  * (1-way elevator) after we process the last (highest sector) request.
248  */
249 static struct deadline_rq *
250 deadline_find_first_drq(struct deadline_data *dd, int data_dir)
251 {
252         struct rb_node *n = dd->sort_list[data_dir].rb_node;
253
254         for (;;) {
255                 if (n->rb_left == NULL)
256                         return rb_entry_drq(n);
257                 
258                 n = n->rb_left;
259         }
260 }
261
262 /*
263  * add drq to rbtree and fifo
264  */
265 static void
266 deadline_add_request(struct request_queue *q, struct request *rq)
267 {
268         struct deadline_data *dd = q->elevator->elevator_data;
269         struct deadline_rq *drq = RQ_DATA(rq);
270
271         const int data_dir = rq_data_dir(drq->request);
272
273         deadline_add_drq_rb(dd, drq);
274         /*
275          * set expire time (only used for reads) and add to fifo list
276          */
277         drq->expires = jiffies + dd->fifo_expire[data_dir];
278         list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
279
280         if (rq_mergeable(rq))
281                 deadline_add_drq_hash(dd, drq);
282 }
283
284 /*
285  * remove rq from rbtree, fifo, and hash
286  */
287 static void deadline_remove_request(request_queue_t *q, struct request *rq)
288 {
289         struct deadline_rq *drq = RQ_DATA(rq);
290         struct deadline_data *dd = q->elevator->elevator_data;
291
292         list_del_init(&drq->fifo);
293         deadline_del_drq_rb(dd, drq);
294         deadline_del_drq_hash(drq);
295 }
296
297 static int
298 deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
299 {
300         struct deadline_data *dd = q->elevator->elevator_data;
301         struct request *__rq;
302         int ret;
303
304         /*
305          * see if the merge hash can satisfy a back merge
306          */
307         __rq = deadline_find_drq_hash(dd, bio->bi_sector);
308         if (__rq) {
309                 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
310
311                 if (elv_rq_merge_ok(__rq, bio)) {
312                         ret = ELEVATOR_BACK_MERGE;
313                         goto out;
314                 }
315         }
316
317         /*
318          * check for front merge
319          */
320         if (dd->front_merges) {
321                 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
322
323                 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
324                 if (__rq) {
325                         BUG_ON(rb_key != rq_rb_key(__rq));
326
327                         if (elv_rq_merge_ok(__rq, bio)) {
328                                 ret = ELEVATOR_FRONT_MERGE;
329                                 goto out;
330                         }
331                 }
332         }
333
334         return ELEVATOR_NO_MERGE;
335 out:
336         if (ret)
337                 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
338         *req = __rq;
339         return ret;
340 }
341
342 static void deadline_merged_request(request_queue_t *q, struct request *req)
343 {
344         struct deadline_data *dd = q->elevator->elevator_data;
345         struct deadline_rq *drq = RQ_DATA(req);
346
347         /*
348          * hash always needs to be repositioned, key is end sector
349          */
350         deadline_del_drq_hash(drq);
351         deadline_add_drq_hash(dd, drq);
352
353         /*
354          * if the merge was a front merge, we need to reposition request
355          */
356         if (rq_rb_key(req) != drq->rb_key) {
357                 deadline_del_drq_rb(dd, drq);
358                 deadline_add_drq_rb(dd, drq);
359         }
360 }
361
362 static void
363 deadline_merged_requests(request_queue_t *q, struct request *req,
364                          struct request *next)
365 {
366         struct deadline_data *dd = q->elevator->elevator_data;
367         struct deadline_rq *drq = RQ_DATA(req);
368         struct deadline_rq *dnext = RQ_DATA(next);
369
370         BUG_ON(!drq);
371         BUG_ON(!dnext);
372
373         /*
374          * reposition drq (this is the merged request) in hash, and in rbtree
375          * in case of a front merge
376          */
377         deadline_del_drq_hash(drq);
378         deadline_add_drq_hash(dd, drq);
379
380         if (rq_rb_key(req) != drq->rb_key) {
381                 deadline_del_drq_rb(dd, drq);
382                 deadline_add_drq_rb(dd, drq);
383         }
384
385         /*
386          * if dnext expires before drq, assign its expire time to drq
387          * and move into dnext position (dnext will be deleted) in fifo
388          */
389         if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
390                 if (time_before(dnext->expires, drq->expires)) {
391                         list_move(&drq->fifo, &dnext->fifo);
392                         drq->expires = dnext->expires;
393                 }
394         }
395
396         /*
397          * kill knowledge of next, this one is a goner
398          */
399         deadline_remove_request(q, next);
400 }
401
402 /*
403  * move request from sort list to dispatch queue.
404  */
405 static inline void
406 deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
407 {
408         request_queue_t *q = drq->request->q;
409
410         deadline_remove_request(q, drq->request);
411         elv_dispatch_add_tail(q, drq->request);
412 }
413
414 /*
415  * move an entry to dispatch queue
416  */
417 static void
418 deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
419 {
420         const int data_dir = rq_data_dir(drq->request);
421         struct rb_node *rbnext = rb_next(&drq->rb_node);
422
423         dd->next_drq[READ] = NULL;
424         dd->next_drq[WRITE] = NULL;
425
426         if (rbnext)
427                 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
428         
429         dd->last_sector = drq->request->sector + drq->request->nr_sectors;
430
431         /*
432          * take it off the sort and fifo list, move
433          * to dispatch queue
434          */
435         deadline_move_to_dispatch(dd, drq);
436 }
437
438 #define list_entry_fifo(ptr)    list_entry((ptr), struct deadline_rq, fifo)
439
440 /*
441  * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
442  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
443  */
444 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
445 {
446         struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
447
448         /*
449          * drq is expired!
450          */
451         if (time_after(jiffies, drq->expires))
452                 return 1;
453
454         return 0;
455 }
456
457 /*
458  * deadline_dispatch_requests selects the best request according to
459  * read/write expire, fifo_batch, etc
460  */
461 static int deadline_dispatch_requests(request_queue_t *q, int force)
462 {
463         struct deadline_data *dd = q->elevator->elevator_data;
464         const int reads = !list_empty(&dd->fifo_list[READ]);
465         const int writes = !list_empty(&dd->fifo_list[WRITE]);
466         struct deadline_rq *drq;
467         int data_dir;
468
469         /*
470          * batches are currently reads XOR writes
471          */
472         if (dd->next_drq[WRITE])
473                 drq = dd->next_drq[WRITE];
474         else
475                 drq = dd->next_drq[READ];
476
477         if (drq) {
478                 /* we have a "next request" */
479                 
480                 if (dd->last_sector != drq->request->sector)
481                         /* end the batch on a non sequential request */
482                         dd->batching += dd->fifo_batch;
483                 
484                 if (dd->batching < dd->fifo_batch)
485                         /* we are still entitled to batch */
486                         goto dispatch_request;
487         }
488
489         /*
490          * at this point we are not running a batch. select the appropriate
491          * data direction (read / write)
492          */
493
494         if (reads) {
495                 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
496
497                 if (writes && (dd->starved++ >= dd->writes_starved))
498                         goto dispatch_writes;
499
500                 data_dir = READ;
501
502                 goto dispatch_find_request;
503         }
504
505         /*
506          * there are either no reads or writes have been starved
507          */
508
509         if (writes) {
510 dispatch_writes:
511                 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
512
513                 dd->starved = 0;
514
515                 data_dir = WRITE;
516
517                 goto dispatch_find_request;
518         }
519
520         return 0;
521
522 dispatch_find_request:
523         /*
524          * we are not running a batch, find best request for selected data_dir
525          */
526         if (deadline_check_fifo(dd, data_dir)) {
527                 /* An expired request exists - satisfy it */
528                 dd->batching = 0;
529                 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
530                 
531         } else if (dd->next_drq[data_dir]) {
532                 /*
533                  * The last req was the same dir and we have a next request in
534                  * sort order. No expired requests so continue on from here.
535                  */
536                 drq = dd->next_drq[data_dir];
537         } else {
538                 /*
539                  * The last req was the other direction or we have run out of
540                  * higher-sectored requests. Go back to the lowest sectored
541                  * request (1 way elevator) and start a new batch.
542                  */
543                 dd->batching = 0;
544                 drq = deadline_find_first_drq(dd, data_dir);
545         }
546
547 dispatch_request:
548         /*
549          * drq is the selected appropriate request.
550          */
551         dd->batching++;
552         deadline_move_request(dd, drq);
553
554         return 1;
555 }
556
557 static int deadline_queue_empty(request_queue_t *q)
558 {
559         struct deadline_data *dd = q->elevator->elevator_data;
560
561         return list_empty(&dd->fifo_list[WRITE])
562                 && list_empty(&dd->fifo_list[READ]);
563 }
564
565 static struct request *
566 deadline_former_request(request_queue_t *q, struct request *rq)
567 {
568         struct deadline_rq *drq = RQ_DATA(rq);
569         struct rb_node *rbprev = rb_prev(&drq->rb_node);
570
571         if (rbprev)
572                 return rb_entry_drq(rbprev)->request;
573
574         return NULL;
575 }
576
577 static struct request *
578 deadline_latter_request(request_queue_t *q, struct request *rq)
579 {
580         struct deadline_rq *drq = RQ_DATA(rq);
581         struct rb_node *rbnext = rb_next(&drq->rb_node);
582
583         if (rbnext)
584                 return rb_entry_drq(rbnext)->request;
585
586         return NULL;
587 }
588
589 static void deadline_exit_queue(elevator_t *e)
590 {
591         struct deadline_data *dd = e->elevator_data;
592
593         BUG_ON(!list_empty(&dd->fifo_list[READ]));
594         BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
595
596         mempool_destroy(dd->drq_pool);
597         kfree(dd->hash);
598         kfree(dd);
599 }
600
601 /*
602  * initialize elevator private data (deadline_data), and alloc a drq for
603  * each request on the free lists
604  */
605 static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
606 {
607         struct deadline_data *dd;
608         int i;
609
610         if (!drq_pool)
611                 return NULL;
612
613         dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
614         if (!dd)
615                 return NULL;
616         memset(dd, 0, sizeof(*dd));
617
618         dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
619                                 GFP_KERNEL, q->node);
620         if (!dd->hash) {
621                 kfree(dd);
622                 return NULL;
623         }
624
625         dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
626                                         mempool_free_slab, drq_pool, q->node);
627         if (!dd->drq_pool) {
628                 kfree(dd->hash);
629                 kfree(dd);
630                 return NULL;
631         }
632
633         for (i = 0; i < DL_HASH_ENTRIES; i++)
634                 INIT_HLIST_HEAD(&dd->hash[i]);
635
636         INIT_LIST_HEAD(&dd->fifo_list[READ]);
637         INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
638         dd->sort_list[READ] = RB_ROOT;
639         dd->sort_list[WRITE] = RB_ROOT;
640         dd->fifo_expire[READ] = read_expire;
641         dd->fifo_expire[WRITE] = write_expire;
642         dd->writes_starved = writes_starved;
643         dd->front_merges = 1;
644         dd->fifo_batch = fifo_batch;
645         return dd;
646 }
647
648 static void deadline_put_request(request_queue_t *q, struct request *rq)
649 {
650         struct deadline_data *dd = q->elevator->elevator_data;
651         struct deadline_rq *drq = RQ_DATA(rq);
652
653         mempool_free(drq, dd->drq_pool);
654         rq->elevator_private = NULL;
655 }
656
657 static int
658 deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
659                      gfp_t gfp_mask)
660 {
661         struct deadline_data *dd = q->elevator->elevator_data;
662         struct deadline_rq *drq;
663
664         drq = mempool_alloc(dd->drq_pool, gfp_mask);
665         if (drq) {
666                 memset(drq, 0, sizeof(*drq));
667                 RB_CLEAR_NODE(&drq->rb_node);
668                 drq->request = rq;
669
670                 INIT_HLIST_NODE(&drq->hash);
671
672                 INIT_LIST_HEAD(&drq->fifo);
673
674                 rq->elevator_private = drq;
675                 return 0;
676         }
677
678         return 1;
679 }
680
681 /*
682  * sysfs parts below
683  */
684
685 static ssize_t
686 deadline_var_show(int var, char *page)
687 {
688         return sprintf(page, "%d\n", var);
689 }
690
691 static ssize_t
692 deadline_var_store(int *var, const char *page, size_t count)
693 {
694         char *p = (char *) page;
695
696         *var = simple_strtol(p, &p, 10);
697         return count;
698 }
699
700 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
701 static ssize_t __FUNC(elevator_t *e, char *page)                        \
702 {                                                                       \
703         struct deadline_data *dd = e->elevator_data;                    \
704         int __data = __VAR;                                             \
705         if (__CONV)                                                     \
706                 __data = jiffies_to_msecs(__data);                      \
707         return deadline_var_show(__data, (page));                       \
708 }
709 SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
710 SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
711 SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
712 SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
713 SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
714 #undef SHOW_FUNCTION
715
716 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
717 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
718 {                                                                       \
719         struct deadline_data *dd = e->elevator_data;                    \
720         int __data;                                                     \
721         int ret = deadline_var_store(&__data, (page), count);           \
722         if (__data < (MIN))                                             \
723                 __data = (MIN);                                         \
724         else if (__data > (MAX))                                        \
725                 __data = (MAX);                                         \
726         if (__CONV)                                                     \
727                 *(__PTR) = msecs_to_jiffies(__data);                    \
728         else                                                            \
729                 *(__PTR) = __data;                                      \
730         return ret;                                                     \
731 }
732 STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
733 STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
734 STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
735 STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
736 STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
737 #undef STORE_FUNCTION
738
739 #define DD_ATTR(name) \
740         __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
741                                       deadline_##name##_store)
742
743 static struct elv_fs_entry deadline_attrs[] = {
744         DD_ATTR(read_expire),
745         DD_ATTR(write_expire),
746         DD_ATTR(writes_starved),
747         DD_ATTR(front_merges),
748         DD_ATTR(fifo_batch),
749         __ATTR_NULL
750 };
751
752 static struct elevator_type iosched_deadline = {
753         .ops = {
754                 .elevator_merge_fn =            deadline_merge,
755                 .elevator_merged_fn =           deadline_merged_request,
756                 .elevator_merge_req_fn =        deadline_merged_requests,
757                 .elevator_dispatch_fn =         deadline_dispatch_requests,
758                 .elevator_add_req_fn =          deadline_add_request,
759                 .elevator_queue_empty_fn =      deadline_queue_empty,
760                 .elevator_former_req_fn =       deadline_former_request,
761                 .elevator_latter_req_fn =       deadline_latter_request,
762                 .elevator_set_req_fn =          deadline_set_request,
763                 .elevator_put_req_fn =          deadline_put_request,
764                 .elevator_init_fn =             deadline_init_queue,
765                 .elevator_exit_fn =             deadline_exit_queue,
766         },
767
768         .elevator_attrs = deadline_attrs,
769         .elevator_name = "deadline",
770         .elevator_owner = THIS_MODULE,
771 };
772
773 static int __init deadline_init(void)
774 {
775         int ret;
776
777         drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
778                                      0, 0, NULL, NULL);
779
780         if (!drq_pool)
781                 return -ENOMEM;
782
783         ret = elv_register(&iosched_deadline);
784         if (ret)
785                 kmem_cache_destroy(drq_pool);
786
787         return ret;
788 }
789
790 static void __exit deadline_exit(void)
791 {
792         kmem_cache_destroy(drq_pool);
793         elv_unregister(&iosched_deadline);
794 }
795
796 module_init(deadline_init);
797 module_exit(deadline_exit);
798
799 MODULE_AUTHOR("Jens Axboe");
800 MODULE_LICENSE("GPL");
801 MODULE_DESCRIPTION("deadline IO scheduler");