Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37
38 #include <asm/uaccess.h>
39
40 static DEFINE_SPINLOCK(elv_list_lock);
41 static LIST_HEAD(elv_list);
42
43 /*
44  * Merge hash stuff.
45  */
46 static const int elv_hash_shift = 6;
47 #define ELV_HASH_BLOCK(sec)     ((sec) >> 3)
48 #define ELV_HASH_FN(sec)        (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49 #define ELV_HASH_ENTRIES        (1 << elv_hash_shift)
50 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
51 #define ELV_ON_HASH(rq)         (!hlist_unhashed(&(rq)->hash))
52
53 /*
54  * can we safely merge with this request?
55  */
56 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
57 {
58         if (!rq_mergeable(rq))
59                 return 0;
60
61         /*
62          * different data direction or already started, don't merge
63          */
64         if (bio_data_dir(bio) != rq_data_dir(rq))
65                 return 0;
66
67         /*
68          * same device and no special stuff set, merge is ok
69          */
70         if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special)
71                 return 1;
72
73         return 0;
74 }
75 EXPORT_SYMBOL(elv_rq_merge_ok);
76
77 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
78 {
79         int ret = ELEVATOR_NO_MERGE;
80
81         /*
82          * we can merge and sequence is ok, check if it's possible
83          */
84         if (elv_rq_merge_ok(__rq, bio)) {
85                 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
86                         ret = ELEVATOR_BACK_MERGE;
87                 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
88                         ret = ELEVATOR_FRONT_MERGE;
89         }
90
91         return ret;
92 }
93
94 static struct elevator_type *elevator_find(const char *name)
95 {
96         struct elevator_type *e = NULL;
97         struct list_head *entry;
98
99         list_for_each(entry, &elv_list) {
100                 struct elevator_type *__e;
101
102                 __e = list_entry(entry, struct elevator_type, list);
103
104                 if (!strcmp(__e->elevator_name, name)) {
105                         e = __e;
106                         break;
107                 }
108         }
109
110         return e;
111 }
112
113 static void elevator_put(struct elevator_type *e)
114 {
115         module_put(e->elevator_owner);
116 }
117
118 static struct elevator_type *elevator_get(const char *name)
119 {
120         struct elevator_type *e;
121
122         spin_lock_irq(&elv_list_lock);
123
124         e = elevator_find(name);
125         if (e && !try_module_get(e->elevator_owner))
126                 e = NULL;
127
128         spin_unlock_irq(&elv_list_lock);
129
130         return e;
131 }
132
133 static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
134 {
135         return eq->ops->elevator_init_fn(q, eq);
136 }
137
138 static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
139                            void *data)
140 {
141         q->elevator = eq;
142         eq->elevator_data = data;
143 }
144
145 static char chosen_elevator[16];
146
147 static int __init elevator_setup(char *str)
148 {
149         /*
150          * Be backwards-compatible with previous kernels, so users
151          * won't get the wrong elevator.
152          */
153         if (!strcmp(str, "as"))
154                 strcpy(chosen_elevator, "anticipatory");
155         else
156                 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
157         return 1;
158 }
159
160 __setup("elevator=", elevator_setup);
161
162 static struct kobj_type elv_ktype;
163
164 static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
165 {
166         elevator_t *eq;
167         int i;
168
169         eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
170         if (unlikely(!eq))
171                 goto err;
172
173         memset(eq, 0, sizeof(*eq));
174         eq->ops = &e->ops;
175         eq->elevator_type = e;
176         kobject_init(&eq->kobj);
177         snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
178         eq->kobj.ktype = &elv_ktype;
179         mutex_init(&eq->sysfs_lock);
180
181         eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
182                                         GFP_KERNEL, q->node);
183         if (!eq->hash)
184                 goto err;
185
186         for (i = 0; i < ELV_HASH_ENTRIES; i++)
187                 INIT_HLIST_HEAD(&eq->hash[i]);
188
189         return eq;
190 err:
191         kfree(eq);
192         elevator_put(e);
193         return NULL;
194 }
195
196 static void elevator_release(struct kobject *kobj)
197 {
198         elevator_t *e = container_of(kobj, elevator_t, kobj);
199
200         elevator_put(e->elevator_type);
201         kfree(e->hash);
202         kfree(e);
203 }
204
205 int elevator_init(request_queue_t *q, char *name)
206 {
207         struct elevator_type *e = NULL;
208         struct elevator_queue *eq;
209         int ret = 0;
210         void *data;
211
212         INIT_LIST_HEAD(&q->queue_head);
213         q->last_merge = NULL;
214         q->end_sector = 0;
215         q->boundary_rq = NULL;
216
217         if (name && !(e = elevator_get(name)))
218                 return -EINVAL;
219
220         if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
221                 printk("I/O scheduler %s not found\n", chosen_elevator);
222
223         if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
224                 printk("Default I/O scheduler not found, using no-op\n");
225                 e = elevator_get("noop");
226         }
227
228         eq = elevator_alloc(q, e);
229         if (!eq)
230                 return -ENOMEM;
231
232         data = elevator_init_queue(q, eq);
233         if (!data) {
234                 kobject_put(&eq->kobj);
235                 return -ENOMEM;
236         }
237
238         elevator_attach(q, eq, data);
239         return ret;
240 }
241
242 EXPORT_SYMBOL(elevator_init);
243
244 void elevator_exit(elevator_t *e)
245 {
246         mutex_lock(&e->sysfs_lock);
247         if (e->ops->elevator_exit_fn)
248                 e->ops->elevator_exit_fn(e);
249         e->ops = NULL;
250         mutex_unlock(&e->sysfs_lock);
251
252         kobject_put(&e->kobj);
253 }
254
255 EXPORT_SYMBOL(elevator_exit);
256
257 static inline void __elv_rqhash_del(struct request *rq)
258 {
259         hlist_del_init(&rq->hash);
260 }
261
262 static void elv_rqhash_del(request_queue_t *q, struct request *rq)
263 {
264         if (ELV_ON_HASH(rq))
265                 __elv_rqhash_del(rq);
266 }
267
268 static void elv_rqhash_add(request_queue_t *q, struct request *rq)
269 {
270         elevator_t *e = q->elevator;
271
272         BUG_ON(ELV_ON_HASH(rq));
273         hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
274 }
275
276 static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
277 {
278         __elv_rqhash_del(rq);
279         elv_rqhash_add(q, rq);
280 }
281
282 static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
283 {
284         elevator_t *e = q->elevator;
285         struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
286         struct hlist_node *entry, *next;
287         struct request *rq;
288
289         hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
290                 BUG_ON(!ELV_ON_HASH(rq));
291
292                 if (unlikely(!rq_mergeable(rq))) {
293                         __elv_rqhash_del(rq);
294                         continue;
295                 }
296
297                 if (rq_hash_key(rq) == offset)
298                         return rq;
299         }
300
301         return NULL;
302 }
303
304 /*
305  * RB-tree support functions for inserting/lookup/removal of requests
306  * in a sorted RB tree.
307  */
308 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
309 {
310         struct rb_node **p = &root->rb_node;
311         struct rb_node *parent = NULL;
312         struct request *__rq;
313
314         while (*p) {
315                 parent = *p;
316                 __rq = rb_entry(parent, struct request, rb_node);
317
318                 if (rq->sector < __rq->sector)
319                         p = &(*p)->rb_left;
320                 else if (rq->sector > __rq->sector)
321                         p = &(*p)->rb_right;
322                 else
323                         return __rq;
324         }
325
326         rb_link_node(&rq->rb_node, parent, p);
327         rb_insert_color(&rq->rb_node, root);
328         return NULL;
329 }
330
331 EXPORT_SYMBOL(elv_rb_add);
332
333 void elv_rb_del(struct rb_root *root, struct request *rq)
334 {
335         BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
336         rb_erase(&rq->rb_node, root);
337         RB_CLEAR_NODE(&rq->rb_node);
338 }
339
340 EXPORT_SYMBOL(elv_rb_del);
341
342 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
343 {
344         struct rb_node *n = root->rb_node;
345         struct request *rq;
346
347         while (n) {
348                 rq = rb_entry(n, struct request, rb_node);
349
350                 if (sector < rq->sector)
351                         n = n->rb_left;
352                 else if (sector > rq->sector)
353                         n = n->rb_right;
354                 else
355                         return rq;
356         }
357
358         return NULL;
359 }
360
361 EXPORT_SYMBOL(elv_rb_find);
362
363 /*
364  * Insert rq into dispatch queue of q.  Queue lock must be held on
365  * entry.  rq is sort insted into the dispatch queue. To be used by
366  * specific elevators.
367  */
368 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
369 {
370         sector_t boundary;
371         struct list_head *entry;
372
373         if (q->last_merge == rq)
374                 q->last_merge = NULL;
375
376         elv_rqhash_del(q, rq);
377
378         q->nr_sorted--;
379
380         boundary = q->end_sector;
381
382         list_for_each_prev(entry, &q->queue_head) {
383                 struct request *pos = list_entry_rq(entry);
384
385                 if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
386                         break;
387                 if (rq->sector >= boundary) {
388                         if (pos->sector < boundary)
389                                 continue;
390                 } else {
391                         if (pos->sector >= boundary)
392                                 break;
393                 }
394                 if (rq->sector >= pos->sector)
395                         break;
396         }
397
398         list_add(&rq->queuelist, entry);
399 }
400
401 EXPORT_SYMBOL(elv_dispatch_sort);
402
403 /*
404  * Insert rq into dispatch queue of q.  Queue lock must be held on
405  * entry.  rq is added to the back of the dispatch queue. To be used by
406  * specific elevators.
407  */
408 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
409 {
410         if (q->last_merge == rq)
411                 q->last_merge = NULL;
412
413         elv_rqhash_del(q, rq);
414
415         q->nr_sorted--;
416
417         q->end_sector = rq_end_sector(rq);
418         q->boundary_rq = rq;
419         list_add_tail(&rq->queuelist, &q->queue_head);
420 }
421
422 EXPORT_SYMBOL(elv_dispatch_add_tail);
423
424 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
425 {
426         elevator_t *e = q->elevator;
427         struct request *__rq;
428         int ret;
429
430         /*
431          * First try one-hit cache.
432          */
433         if (q->last_merge) {
434                 ret = elv_try_merge(q->last_merge, bio);
435                 if (ret != ELEVATOR_NO_MERGE) {
436                         *req = q->last_merge;
437                         return ret;
438                 }
439         }
440
441         /*
442          * See if our hash lookup can find a potential backmerge.
443          */
444         __rq = elv_rqhash_find(q, bio->bi_sector);
445         if (__rq && elv_rq_merge_ok(__rq, bio)) {
446                 *req = __rq;
447                 return ELEVATOR_BACK_MERGE;
448         }
449
450         if (e->ops->elevator_merge_fn)
451                 return e->ops->elevator_merge_fn(q, req, bio);
452
453         return ELEVATOR_NO_MERGE;
454 }
455
456 void elv_merged_request(request_queue_t *q, struct request *rq, int type)
457 {
458         elevator_t *e = q->elevator;
459
460         if (e->ops->elevator_merged_fn)
461                 e->ops->elevator_merged_fn(q, rq, type);
462
463         if (type == ELEVATOR_BACK_MERGE)
464                 elv_rqhash_reposition(q, rq);
465
466         q->last_merge = rq;
467 }
468
469 void elv_merge_requests(request_queue_t *q, struct request *rq,
470                              struct request *next)
471 {
472         elevator_t *e = q->elevator;
473
474         if (e->ops->elevator_merge_req_fn)
475                 e->ops->elevator_merge_req_fn(q, rq, next);
476
477         elv_rqhash_reposition(q, rq);
478         elv_rqhash_del(q, next);
479
480         q->nr_sorted--;
481         q->last_merge = rq;
482 }
483
484 void elv_requeue_request(request_queue_t *q, struct request *rq)
485 {
486         elevator_t *e = q->elevator;
487
488         /*
489          * it already went through dequeue, we need to decrement the
490          * in_flight count again
491          */
492         if (blk_account_rq(rq)) {
493                 q->in_flight--;
494                 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
495                         e->ops->elevator_deactivate_req_fn(q, rq);
496         }
497
498         rq->cmd_flags &= ~REQ_STARTED;
499
500         elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
501 }
502
503 static void elv_drain_elevator(request_queue_t *q)
504 {
505         static int printed;
506         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
507                 ;
508         if (q->nr_sorted == 0)
509                 return;
510         if (printed++ < 10) {
511                 printk(KERN_ERR "%s: forced dispatching is broken "
512                        "(nr_sorted=%u), please report this\n",
513                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
514         }
515 }
516
517 void elv_insert(request_queue_t *q, struct request *rq, int where)
518 {
519         struct list_head *pos;
520         unsigned ordseq;
521         int unplug_it = 1;
522
523         blk_add_trace_rq(q, rq, BLK_TA_INSERT);
524
525         rq->q = q;
526
527         switch (where) {
528         case ELEVATOR_INSERT_FRONT:
529                 rq->cmd_flags |= REQ_SOFTBARRIER;
530
531                 list_add(&rq->queuelist, &q->queue_head);
532                 break;
533
534         case ELEVATOR_INSERT_BACK:
535                 rq->cmd_flags |= REQ_SOFTBARRIER;
536                 elv_drain_elevator(q);
537                 list_add_tail(&rq->queuelist, &q->queue_head);
538                 /*
539                  * We kick the queue here for the following reasons.
540                  * - The elevator might have returned NULL previously
541                  *   to delay requests and returned them now.  As the
542                  *   queue wasn't empty before this request, ll_rw_blk
543                  *   won't run the queue on return, resulting in hang.
544                  * - Usually, back inserted requests won't be merged
545                  *   with anything.  There's no point in delaying queue
546                  *   processing.
547                  */
548                 blk_remove_plug(q);
549                 q->request_fn(q);
550                 break;
551
552         case ELEVATOR_INSERT_SORT:
553                 BUG_ON(!blk_fs_request(rq));
554                 rq->cmd_flags |= REQ_SORTED;
555                 q->nr_sorted++;
556                 if (rq_mergeable(rq)) {
557                         elv_rqhash_add(q, rq);
558                         if (!q->last_merge)
559                                 q->last_merge = rq;
560                 }
561
562                 /*
563                  * Some ioscheds (cfq) run q->request_fn directly, so
564                  * rq cannot be accessed after calling
565                  * elevator_add_req_fn.
566                  */
567                 q->elevator->ops->elevator_add_req_fn(q, rq);
568                 break;
569
570         case ELEVATOR_INSERT_REQUEUE:
571                 /*
572                  * If ordered flush isn't in progress, we do front
573                  * insertion; otherwise, requests should be requeued
574                  * in ordseq order.
575                  */
576                 rq->cmd_flags |= REQ_SOFTBARRIER;
577
578                 if (q->ordseq == 0) {
579                         list_add(&rq->queuelist, &q->queue_head);
580                         break;
581                 }
582
583                 ordseq = blk_ordered_req_seq(rq);
584
585                 list_for_each(pos, &q->queue_head) {
586                         struct request *pos_rq = list_entry_rq(pos);
587                         if (ordseq <= blk_ordered_req_seq(pos_rq))
588                                 break;
589                 }
590
591                 list_add_tail(&rq->queuelist, pos);
592                 /*
593                  * most requeues happen because of a busy condition, don't
594                  * force unplug of the queue for that case.
595                  */
596                 unplug_it = 0;
597                 break;
598
599         default:
600                 printk(KERN_ERR "%s: bad insertion point %d\n",
601                        __FUNCTION__, where);
602                 BUG();
603         }
604
605         if (unplug_it && blk_queue_plugged(q)) {
606                 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
607                         - q->in_flight;
608
609                 if (nrq >= q->unplug_thresh)
610                         __generic_unplug_device(q);
611         }
612 }
613
614 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
615                        int plug)
616 {
617         if (q->ordcolor)
618                 rq->cmd_flags |= REQ_ORDERED_COLOR;
619
620         if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
621                 /*
622                  * toggle ordered color
623                  */
624                 if (blk_barrier_rq(rq))
625                         q->ordcolor ^= 1;
626
627                 /*
628                  * barriers implicitly indicate back insertion
629                  */
630                 if (where == ELEVATOR_INSERT_SORT)
631                         where = ELEVATOR_INSERT_BACK;
632
633                 /*
634                  * this request is scheduling boundary, update
635                  * end_sector
636                  */
637                 if (blk_fs_request(rq)) {
638                         q->end_sector = rq_end_sector(rq);
639                         q->boundary_rq = rq;
640                 }
641         } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
642                 where = ELEVATOR_INSERT_BACK;
643
644         if (plug)
645                 blk_plug_device(q);
646
647         elv_insert(q, rq, where);
648 }
649
650 EXPORT_SYMBOL(__elv_add_request);
651
652 void elv_add_request(request_queue_t *q, struct request *rq, int where,
653                      int plug)
654 {
655         unsigned long flags;
656
657         spin_lock_irqsave(q->queue_lock, flags);
658         __elv_add_request(q, rq, where, plug);
659         spin_unlock_irqrestore(q->queue_lock, flags);
660 }
661
662 EXPORT_SYMBOL(elv_add_request);
663
664 static inline struct request *__elv_next_request(request_queue_t *q)
665 {
666         struct request *rq;
667
668         while (1) {
669                 while (!list_empty(&q->queue_head)) {
670                         rq = list_entry_rq(q->queue_head.next);
671                         if (blk_do_ordered(q, &rq))
672                                 return rq;
673                 }
674
675                 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
676                         return NULL;
677         }
678 }
679
680 struct request *elv_next_request(request_queue_t *q)
681 {
682         struct request *rq;
683         int ret;
684
685         while ((rq = __elv_next_request(q)) != NULL) {
686                 if (!(rq->cmd_flags & REQ_STARTED)) {
687                         elevator_t *e = q->elevator;
688
689                         /*
690                          * This is the first time the device driver
691                          * sees this request (possibly after
692                          * requeueing).  Notify IO scheduler.
693                          */
694                         if (blk_sorted_rq(rq) &&
695                             e->ops->elevator_activate_req_fn)
696                                 e->ops->elevator_activate_req_fn(q, rq);
697
698                         /*
699                          * just mark as started even if we don't start
700                          * it, a request that has been delayed should
701                          * not be passed by new incoming requests
702                          */
703                         rq->cmd_flags |= REQ_STARTED;
704                         blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
705                 }
706
707                 if (!q->boundary_rq || q->boundary_rq == rq) {
708                         q->end_sector = rq_end_sector(rq);
709                         q->boundary_rq = NULL;
710                 }
711
712                 if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
713                         break;
714
715                 ret = q->prep_rq_fn(q, rq);
716                 if (ret == BLKPREP_OK) {
717                         break;
718                 } else if (ret == BLKPREP_DEFER) {
719                         /*
720                          * the request may have been (partially) prepped.
721                          * we need to keep this request in the front to
722                          * avoid resource deadlock.  REQ_STARTED will
723                          * prevent other fs requests from passing this one.
724                          */
725                         rq = NULL;
726                         break;
727                 } else if (ret == BLKPREP_KILL) {
728                         int nr_bytes = rq->hard_nr_sectors << 9;
729
730                         if (!nr_bytes)
731                                 nr_bytes = rq->data_len;
732
733                         blkdev_dequeue_request(rq);
734                         rq->cmd_flags |= REQ_QUIET;
735                         end_that_request_chunk(rq, 0, nr_bytes);
736                         end_that_request_last(rq, 0);
737                 } else {
738                         printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
739                                                                 ret);
740                         break;
741                 }
742         }
743
744         return rq;
745 }
746
747 EXPORT_SYMBOL(elv_next_request);
748
749 void elv_dequeue_request(request_queue_t *q, struct request *rq)
750 {
751         BUG_ON(list_empty(&rq->queuelist));
752         BUG_ON(ELV_ON_HASH(rq));
753
754         list_del_init(&rq->queuelist);
755
756         /*
757          * the time frame between a request being removed from the lists
758          * and to it is freed is accounted as io that is in progress at
759          * the driver side.
760          */
761         if (blk_account_rq(rq))
762                 q->in_flight++;
763 }
764
765 EXPORT_SYMBOL(elv_dequeue_request);
766
767 int elv_queue_empty(request_queue_t *q)
768 {
769         elevator_t *e = q->elevator;
770
771         if (!list_empty(&q->queue_head))
772                 return 0;
773
774         if (e->ops->elevator_queue_empty_fn)
775                 return e->ops->elevator_queue_empty_fn(q);
776
777         return 1;
778 }
779
780 EXPORT_SYMBOL(elv_queue_empty);
781
782 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
783 {
784         elevator_t *e = q->elevator;
785
786         if (e->ops->elevator_latter_req_fn)
787                 return e->ops->elevator_latter_req_fn(q, rq);
788         return NULL;
789 }
790
791 struct request *elv_former_request(request_queue_t *q, struct request *rq)
792 {
793         elevator_t *e = q->elevator;
794
795         if (e->ops->elevator_former_req_fn)
796                 return e->ops->elevator_former_req_fn(q, rq);
797         return NULL;
798 }
799
800 int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
801 {
802         elevator_t *e = q->elevator;
803
804         if (e->ops->elevator_set_req_fn)
805                 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
806
807         rq->elevator_private = NULL;
808         return 0;
809 }
810
811 void elv_put_request(request_queue_t *q, struct request *rq)
812 {
813         elevator_t *e = q->elevator;
814
815         if (e->ops->elevator_put_req_fn)
816                 e->ops->elevator_put_req_fn(q, rq);
817 }
818
819 int elv_may_queue(request_queue_t *q, int rw)
820 {
821         elevator_t *e = q->elevator;
822
823         if (e->ops->elevator_may_queue_fn)
824                 return e->ops->elevator_may_queue_fn(q, rw);
825
826         return ELV_MQUEUE_MAY;
827 }
828
829 void elv_completed_request(request_queue_t *q, struct request *rq)
830 {
831         elevator_t *e = q->elevator;
832
833         /*
834          * request is released from the driver, io must be done
835          */
836         if (blk_account_rq(rq)) {
837                 q->in_flight--;
838                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
839                         e->ops->elevator_completed_req_fn(q, rq);
840         }
841
842         /*
843          * Check if the queue is waiting for fs requests to be
844          * drained for flush sequence.
845          */
846         if (unlikely(q->ordseq)) {
847                 struct request *first_rq = list_entry_rq(q->queue_head.next);
848                 if (q->in_flight == 0 &&
849                     blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
850                     blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
851                         blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
852                         q->request_fn(q);
853                 }
854         }
855 }
856
857 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
858
859 static ssize_t
860 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
861 {
862         elevator_t *e = container_of(kobj, elevator_t, kobj);
863         struct elv_fs_entry *entry = to_elv(attr);
864         ssize_t error;
865
866         if (!entry->show)
867                 return -EIO;
868
869         mutex_lock(&e->sysfs_lock);
870         error = e->ops ? entry->show(e, page) : -ENOENT;
871         mutex_unlock(&e->sysfs_lock);
872         return error;
873 }
874
875 static ssize_t
876 elv_attr_store(struct kobject *kobj, struct attribute *attr,
877                const char *page, size_t length)
878 {
879         elevator_t *e = container_of(kobj, elevator_t, kobj);
880         struct elv_fs_entry *entry = to_elv(attr);
881         ssize_t error;
882
883         if (!entry->store)
884                 return -EIO;
885
886         mutex_lock(&e->sysfs_lock);
887         error = e->ops ? entry->store(e, page, length) : -ENOENT;
888         mutex_unlock(&e->sysfs_lock);
889         return error;
890 }
891
892 static struct sysfs_ops elv_sysfs_ops = {
893         .show   = elv_attr_show,
894         .store  = elv_attr_store,
895 };
896
897 static struct kobj_type elv_ktype = {
898         .sysfs_ops      = &elv_sysfs_ops,
899         .release        = elevator_release,
900 };
901
902 int elv_register_queue(struct request_queue *q)
903 {
904         elevator_t *e = q->elevator;
905         int error;
906
907         e->kobj.parent = &q->kobj;
908
909         error = kobject_add(&e->kobj);
910         if (!error) {
911                 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
912                 if (attr) {
913                         while (attr->attr.name) {
914                                 if (sysfs_create_file(&e->kobj, &attr->attr))
915                                         break;
916                                 attr++;
917                         }
918                 }
919                 kobject_uevent(&e->kobj, KOBJ_ADD);
920         }
921         return error;
922 }
923
924 static void __elv_unregister_queue(elevator_t *e)
925 {
926         kobject_uevent(&e->kobj, KOBJ_REMOVE);
927         kobject_del(&e->kobj);
928 }
929
930 void elv_unregister_queue(struct request_queue *q)
931 {
932         if (q)
933                 __elv_unregister_queue(q->elevator);
934 }
935
936 int elv_register(struct elevator_type *e)
937 {
938         spin_lock_irq(&elv_list_lock);
939         BUG_ON(elevator_find(e->elevator_name));
940         list_add_tail(&e->list, &elv_list);
941         spin_unlock_irq(&elv_list_lock);
942
943         printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
944         if (!strcmp(e->elevator_name, chosen_elevator) ||
945                         (!*chosen_elevator &&
946                          !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
947                                 printk(" (default)");
948         printk("\n");
949         return 0;
950 }
951 EXPORT_SYMBOL_GPL(elv_register);
952
953 void elv_unregister(struct elevator_type *e)
954 {
955         struct task_struct *g, *p;
956
957         /*
958          * Iterate every thread in the process to remove the io contexts.
959          */
960         if (e->ops.trim) {
961                 read_lock(&tasklist_lock);
962                 do_each_thread(g, p) {
963                         task_lock(p);
964                         if (p->io_context)
965                                 e->ops.trim(p->io_context);
966                         task_unlock(p);
967                 } while_each_thread(g, p);
968                 read_unlock(&tasklist_lock);
969         }
970
971         spin_lock_irq(&elv_list_lock);
972         list_del_init(&e->list);
973         spin_unlock_irq(&elv_list_lock);
974 }
975 EXPORT_SYMBOL_GPL(elv_unregister);
976
977 /*
978  * switch to new_e io scheduler. be careful not to introduce deadlocks -
979  * we don't free the old io scheduler, before we have allocated what we
980  * need for the new one. this way we have a chance of going back to the old
981  * one, if the new one fails init for some reason.
982  */
983 static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
984 {
985         elevator_t *old_elevator, *e;
986         void *data;
987
988         /*
989          * Allocate new elevator
990          */
991         e = elevator_alloc(q, new_e);
992         if (!e)
993                 return 0;
994
995         data = elevator_init_queue(q, e);
996         if (!data) {
997                 kobject_put(&e->kobj);
998                 return 0;
999         }
1000
1001         /*
1002          * Turn on BYPASS and drain all requests w/ elevator private data
1003          */
1004         spin_lock_irq(q->queue_lock);
1005
1006         set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1007
1008         elv_drain_elevator(q);
1009
1010         while (q->rq.elvpriv) {
1011                 blk_remove_plug(q);
1012                 q->request_fn(q);
1013                 spin_unlock_irq(q->queue_lock);
1014                 msleep(10);
1015                 spin_lock_irq(q->queue_lock);
1016                 elv_drain_elevator(q);
1017         }
1018
1019         /*
1020          * Remember old elevator.
1021          */
1022         old_elevator = q->elevator;
1023
1024         /*
1025          * attach and start new elevator
1026          */
1027         elevator_attach(q, e, data);
1028
1029         spin_unlock_irq(q->queue_lock);
1030
1031         __elv_unregister_queue(old_elevator);
1032
1033         if (elv_register_queue(q))
1034                 goto fail_register;
1035
1036         /*
1037          * finally exit old elevator and turn off BYPASS.
1038          */
1039         elevator_exit(old_elevator);
1040         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1041         return 1;
1042
1043 fail_register:
1044         /*
1045          * switch failed, exit the new io scheduler and reattach the old
1046          * one again (along with re-adding the sysfs dir)
1047          */
1048         elevator_exit(e);
1049         q->elevator = old_elevator;
1050         elv_register_queue(q);
1051         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1052         return 0;
1053 }
1054
1055 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
1056 {
1057         char elevator_name[ELV_NAME_MAX];
1058         size_t len;
1059         struct elevator_type *e;
1060
1061         elevator_name[sizeof(elevator_name) - 1] = '\0';
1062         strncpy(elevator_name, name, sizeof(elevator_name) - 1);
1063         len = strlen(elevator_name);
1064
1065         if (len && elevator_name[len - 1] == '\n')
1066                 elevator_name[len - 1] = '\0';
1067
1068         e = elevator_get(elevator_name);
1069         if (!e) {
1070                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1071                 return -EINVAL;
1072         }
1073
1074         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1075                 elevator_put(e);
1076                 return count;
1077         }
1078
1079         if (!elevator_switch(q, e))
1080                 printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
1081         return count;
1082 }
1083
1084 ssize_t elv_iosched_show(request_queue_t *q, char *name)
1085 {
1086         elevator_t *e = q->elevator;
1087         struct elevator_type *elv = e->elevator_type;
1088         struct list_head *entry;
1089         int len = 0;
1090
1091         spin_lock_irq(q->queue_lock);
1092         list_for_each(entry, &elv_list) {
1093                 struct elevator_type *__e;
1094
1095                 __e = list_entry(entry, struct elevator_type, list);
1096                 if (!strcmp(elv->elevator_name, __e->elevator_name))
1097                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
1098                 else
1099                         len += sprintf(name+len, "%s ", __e->elevator_name);
1100         }
1101         spin_unlock_irq(q->queue_lock);
1102
1103         len += sprintf(len+name, "\n");
1104         return len;
1105 }
1106
1107 struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
1108 {
1109         struct rb_node *rbprev = rb_prev(&rq->rb_node);
1110
1111         if (rbprev)
1112                 return rb_entry_rq(rbprev);
1113
1114         return NULL;
1115 }
1116
1117 EXPORT_SYMBOL(elv_rb_former_request);
1118
1119 struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
1120 {
1121         struct rb_node *rbnext = rb_next(&rq->rb_node);
1122
1123         if (rbnext)
1124                 return rb_entry_rq(rbnext);
1125
1126         return NULL;
1127 }
1128
1129 EXPORT_SYMBOL(elv_rb_latter_request);