[PATCH] x86_64: When allocation of merged SG lists fails in the IOMMU don't merge
[linux-2.6] / block / elevator.c
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@suse.de> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/compiler.h>
35 #include <linux/delay.h>
36
37 #include <asm/uaccess.h>
38
39 static DEFINE_SPINLOCK(elv_list_lock);
40 static LIST_HEAD(elv_list);
41
42 /*
43  * can we safely merge with this request?
44  */
45 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
46 {
47         if (!rq_mergeable(rq))
48                 return 0;
49
50         /*
51          * different data direction or already started, don't merge
52          */
53         if (bio_data_dir(bio) != rq_data_dir(rq))
54                 return 0;
55
56         /*
57          * same device and no special stuff set, merge is ok
58          */
59         if (rq->rq_disk == bio->bi_bdev->bd_disk &&
60             !rq->waiting && !rq->special)
61                 return 1;
62
63         return 0;
64 }
65 EXPORT_SYMBOL(elv_rq_merge_ok);
66
67 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
68 {
69         int ret = ELEVATOR_NO_MERGE;
70
71         /*
72          * we can merge and sequence is ok, check if it's possible
73          */
74         if (elv_rq_merge_ok(__rq, bio)) {
75                 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
76                         ret = ELEVATOR_BACK_MERGE;
77                 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
78                         ret = ELEVATOR_FRONT_MERGE;
79         }
80
81         return ret;
82 }
83
84 static struct elevator_type *elevator_find(const char *name)
85 {
86         struct elevator_type *e = NULL;
87         struct list_head *entry;
88
89         list_for_each(entry, &elv_list) {
90                 struct elevator_type *__e;
91
92                 __e = list_entry(entry, struct elevator_type, list);
93
94                 if (!strcmp(__e->elevator_name, name)) {
95                         e = __e;
96                         break;
97                 }
98         }
99
100         return e;
101 }
102
103 static void elevator_put(struct elevator_type *e)
104 {
105         module_put(e->elevator_owner);
106 }
107
108 static struct elevator_type *elevator_get(const char *name)
109 {
110         struct elevator_type *e;
111
112         spin_lock_irq(&elv_list_lock);
113
114         e = elevator_find(name);
115         if (e && !try_module_get(e->elevator_owner))
116                 e = NULL;
117
118         spin_unlock_irq(&elv_list_lock);
119
120         return e;
121 }
122
123 static int elevator_attach(request_queue_t *q, struct elevator_type *e,
124                            struct elevator_queue *eq)
125 {
126         int ret = 0;
127
128         memset(eq, 0, sizeof(*eq));
129         eq->ops = &e->ops;
130         eq->elevator_type = e;
131
132         q->elevator = eq;
133
134         if (eq->ops->elevator_init_fn)
135                 ret = eq->ops->elevator_init_fn(q, eq);
136
137         return ret;
138 }
139
140 static char chosen_elevator[16];
141
142 static int __init elevator_setup(char *str)
143 {
144         /*
145          * Be backwards-compatible with previous kernels, so users
146          * won't get the wrong elevator.
147          */
148         if (!strcmp(str, "as"))
149                 strcpy(chosen_elevator, "anticipatory");
150         else
151                 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
152         return 0;
153 }
154
155 __setup("elevator=", elevator_setup);
156
157 int elevator_init(request_queue_t *q, char *name)
158 {
159         struct elevator_type *e = NULL;
160         struct elevator_queue *eq;
161         int ret = 0;
162
163         INIT_LIST_HEAD(&q->queue_head);
164         q->last_merge = NULL;
165         q->end_sector = 0;
166         q->boundary_rq = NULL;
167
168         if (name && !(e = elevator_get(name)))
169                 return -EINVAL;
170
171         if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
172                 printk("I/O scheduler %s not found\n", chosen_elevator);
173
174         if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
175                 printk("Default I/O scheduler not found, using no-op\n");
176                 e = elevator_get("noop");
177         }
178
179         eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
180         if (!eq) {
181                 elevator_put(e);
182                 return -ENOMEM;
183         }
184
185         ret = elevator_attach(q, e, eq);
186         if (ret) {
187                 kfree(eq);
188                 elevator_put(e);
189         }
190
191         return ret;
192 }
193
194 void elevator_exit(elevator_t *e)
195 {
196         if (e->ops->elevator_exit_fn)
197                 e->ops->elevator_exit_fn(e);
198
199         elevator_put(e->elevator_type);
200         e->elevator_type = NULL;
201         kfree(e);
202 }
203
204 /*
205  * Insert rq into dispatch queue of q.  Queue lock must be held on
206  * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
207  * appended to the dispatch queue.  To be used by specific elevators.
208  */
209 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
210 {
211         sector_t boundary;
212         struct list_head *entry;
213
214         if (q->last_merge == rq)
215                 q->last_merge = NULL;
216         q->nr_sorted--;
217
218         boundary = q->end_sector;
219
220         list_for_each_prev(entry, &q->queue_head) {
221                 struct request *pos = list_entry_rq(entry);
222
223                 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
224                         break;
225                 if (rq->sector >= boundary) {
226                         if (pos->sector < boundary)
227                                 continue;
228                 } else {
229                         if (pos->sector >= boundary)
230                                 break;
231                 }
232                 if (rq->sector >= pos->sector)
233                         break;
234         }
235
236         list_add(&rq->queuelist, entry);
237 }
238
239 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
240 {
241         elevator_t *e = q->elevator;
242         int ret;
243
244         if (q->last_merge) {
245                 ret = elv_try_merge(q->last_merge, bio);
246                 if (ret != ELEVATOR_NO_MERGE) {
247                         *req = q->last_merge;
248                         return ret;
249                 }
250         }
251
252         if (e->ops->elevator_merge_fn)
253                 return e->ops->elevator_merge_fn(q, req, bio);
254
255         return ELEVATOR_NO_MERGE;
256 }
257
258 void elv_merged_request(request_queue_t *q, struct request *rq)
259 {
260         elevator_t *e = q->elevator;
261
262         if (e->ops->elevator_merged_fn)
263                 e->ops->elevator_merged_fn(q, rq);
264
265         q->last_merge = rq;
266 }
267
268 void elv_merge_requests(request_queue_t *q, struct request *rq,
269                              struct request *next)
270 {
271         elevator_t *e = q->elevator;
272
273         if (e->ops->elevator_merge_req_fn)
274                 e->ops->elevator_merge_req_fn(q, rq, next);
275         q->nr_sorted--;
276
277         q->last_merge = rq;
278 }
279
280 void elv_requeue_request(request_queue_t *q, struct request *rq)
281 {
282         elevator_t *e = q->elevator;
283
284         /*
285          * it already went through dequeue, we need to decrement the
286          * in_flight count again
287          */
288         if (blk_account_rq(rq)) {
289                 q->in_flight--;
290                 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
291                         e->ops->elevator_deactivate_req_fn(q, rq);
292         }
293
294         rq->flags &= ~REQ_STARTED;
295
296         __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
297 }
298
299 static void elv_drain_elevator(request_queue_t *q)
300 {
301         static int printed;
302         while (q->elevator->ops->elevator_dispatch_fn(q, 1))
303                 ;
304         if (q->nr_sorted == 0)
305                 return;
306         if (printed++ < 10) {
307                 printk(KERN_ERR "%s: forced dispatching is broken "
308                        "(nr_sorted=%u), please report this\n",
309                        q->elevator->elevator_type->elevator_name, q->nr_sorted);
310         }
311 }
312
313 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
314                        int plug)
315 {
316         struct list_head *pos;
317         unsigned ordseq;
318
319         if (q->ordcolor)
320                 rq->flags |= REQ_ORDERED_COLOR;
321
322         if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
323                 /*
324                  * toggle ordered color
325                  */
326                 q->ordcolor ^= 1;
327
328                 /*
329                  * barriers implicitly indicate back insertion
330                  */
331                 if (where == ELEVATOR_INSERT_SORT)
332                         where = ELEVATOR_INSERT_BACK;
333
334                 /*
335                  * this request is scheduling boundary, update end_sector
336                  */
337                 if (blk_fs_request(rq)) {
338                         q->end_sector = rq_end_sector(rq);
339                         q->boundary_rq = rq;
340                 }
341         } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
342                 where = ELEVATOR_INSERT_BACK;
343
344         if (plug)
345                 blk_plug_device(q);
346
347         rq->q = q;
348
349         switch (where) {
350         case ELEVATOR_INSERT_FRONT:
351                 rq->flags |= REQ_SOFTBARRIER;
352
353                 list_add(&rq->queuelist, &q->queue_head);
354                 break;
355
356         case ELEVATOR_INSERT_BACK:
357                 rq->flags |= REQ_SOFTBARRIER;
358                 elv_drain_elevator(q);
359                 list_add_tail(&rq->queuelist, &q->queue_head);
360                 /*
361                  * We kick the queue here for the following reasons.
362                  * - The elevator might have returned NULL previously
363                  *   to delay requests and returned them now.  As the
364                  *   queue wasn't empty before this request, ll_rw_blk
365                  *   won't run the queue on return, resulting in hang.
366                  * - Usually, back inserted requests won't be merged
367                  *   with anything.  There's no point in delaying queue
368                  *   processing.
369                  */
370                 blk_remove_plug(q);
371                 q->request_fn(q);
372                 break;
373
374         case ELEVATOR_INSERT_SORT:
375                 BUG_ON(!blk_fs_request(rq));
376                 rq->flags |= REQ_SORTED;
377                 q->nr_sorted++;
378                 if (q->last_merge == NULL && rq_mergeable(rq))
379                         q->last_merge = rq;
380                 /*
381                  * Some ioscheds (cfq) run q->request_fn directly, so
382                  * rq cannot be accessed after calling
383                  * elevator_add_req_fn.
384                  */
385                 q->elevator->ops->elevator_add_req_fn(q, rq);
386                 break;
387
388         case ELEVATOR_INSERT_REQUEUE:
389                 /*
390                  * If ordered flush isn't in progress, we do front
391                  * insertion; otherwise, requests should be requeued
392                  * in ordseq order.
393                  */
394                 rq->flags |= REQ_SOFTBARRIER;
395
396                 if (q->ordseq == 0) {
397                         list_add(&rq->queuelist, &q->queue_head);
398                         break;
399                 }
400
401                 ordseq = blk_ordered_req_seq(rq);
402
403                 list_for_each(pos, &q->queue_head) {
404                         struct request *pos_rq = list_entry_rq(pos);
405                         if (ordseq <= blk_ordered_req_seq(pos_rq))
406                                 break;
407                 }
408
409                 list_add_tail(&rq->queuelist, pos);
410                 break;
411
412         default:
413                 printk(KERN_ERR "%s: bad insertion point %d\n",
414                        __FUNCTION__, where);
415                 BUG();
416         }
417
418         if (blk_queue_plugged(q)) {
419                 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
420                         - q->in_flight;
421
422                 if (nrq >= q->unplug_thresh)
423                         __generic_unplug_device(q);
424         }
425 }
426
427 void elv_add_request(request_queue_t *q, struct request *rq, int where,
428                      int plug)
429 {
430         unsigned long flags;
431
432         spin_lock_irqsave(q->queue_lock, flags);
433         __elv_add_request(q, rq, where, plug);
434         spin_unlock_irqrestore(q->queue_lock, flags);
435 }
436
437 static inline struct request *__elv_next_request(request_queue_t *q)
438 {
439         struct request *rq;
440
441         while (1) {
442                 while (!list_empty(&q->queue_head)) {
443                         rq = list_entry_rq(q->queue_head.next);
444                         if (blk_do_ordered(q, &rq))
445                                 return rq;
446                 }
447
448                 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
449                         return NULL;
450         }
451 }
452
453 struct request *elv_next_request(request_queue_t *q)
454 {
455         struct request *rq;
456         int ret;
457
458         while ((rq = __elv_next_request(q)) != NULL) {
459                 if (!(rq->flags & REQ_STARTED)) {
460                         elevator_t *e = q->elevator;
461
462                         /*
463                          * This is the first time the device driver
464                          * sees this request (possibly after
465                          * requeueing).  Notify IO scheduler.
466                          */
467                         if (blk_sorted_rq(rq) &&
468                             e->ops->elevator_activate_req_fn)
469                                 e->ops->elevator_activate_req_fn(q, rq);
470
471                         /*
472                          * just mark as started even if we don't start
473                          * it, a request that has been delayed should
474                          * not be passed by new incoming requests
475                          */
476                         rq->flags |= REQ_STARTED;
477                 }
478
479                 if (!q->boundary_rq || q->boundary_rq == rq) {
480                         q->end_sector = rq_end_sector(rq);
481                         q->boundary_rq = NULL;
482                 }
483
484                 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
485                         break;
486
487                 ret = q->prep_rq_fn(q, rq);
488                 if (ret == BLKPREP_OK) {
489                         break;
490                 } else if (ret == BLKPREP_DEFER) {
491                         /*
492                          * the request may have been (partially) prepped.
493                          * we need to keep this request in the front to
494                          * avoid resource deadlock.  REQ_STARTED will
495                          * prevent other fs requests from passing this one.
496                          */
497                         rq = NULL;
498                         break;
499                 } else if (ret == BLKPREP_KILL) {
500                         int nr_bytes = rq->hard_nr_sectors << 9;
501
502                         if (!nr_bytes)
503                                 nr_bytes = rq->data_len;
504
505                         blkdev_dequeue_request(rq);
506                         rq->flags |= REQ_QUIET;
507                         end_that_request_chunk(rq, 0, nr_bytes);
508                         end_that_request_last(rq, 0);
509                 } else {
510                         printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
511                                                                 ret);
512                         break;
513                 }
514         }
515
516         return rq;
517 }
518
519 void elv_dequeue_request(request_queue_t *q, struct request *rq)
520 {
521         BUG_ON(list_empty(&rq->queuelist));
522
523         list_del_init(&rq->queuelist);
524
525         /*
526          * the time frame between a request being removed from the lists
527          * and to it is freed is accounted as io that is in progress at
528          * the driver side.
529          */
530         if (blk_account_rq(rq))
531                 q->in_flight++;
532 }
533
534 int elv_queue_empty(request_queue_t *q)
535 {
536         elevator_t *e = q->elevator;
537
538         if (!list_empty(&q->queue_head))
539                 return 0;
540
541         if (e->ops->elevator_queue_empty_fn)
542                 return e->ops->elevator_queue_empty_fn(q);
543
544         return 1;
545 }
546
547 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
548 {
549         elevator_t *e = q->elevator;
550
551         if (e->ops->elevator_latter_req_fn)
552                 return e->ops->elevator_latter_req_fn(q, rq);
553         return NULL;
554 }
555
556 struct request *elv_former_request(request_queue_t *q, struct request *rq)
557 {
558         elevator_t *e = q->elevator;
559
560         if (e->ops->elevator_former_req_fn)
561                 return e->ops->elevator_former_req_fn(q, rq);
562         return NULL;
563 }
564
565 int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
566                     gfp_t gfp_mask)
567 {
568         elevator_t *e = q->elevator;
569
570         if (e->ops->elevator_set_req_fn)
571                 return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
572
573         rq->elevator_private = NULL;
574         return 0;
575 }
576
577 void elv_put_request(request_queue_t *q, struct request *rq)
578 {
579         elevator_t *e = q->elevator;
580
581         if (e->ops->elevator_put_req_fn)
582                 e->ops->elevator_put_req_fn(q, rq);
583 }
584
585 int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
586 {
587         elevator_t *e = q->elevator;
588
589         if (e->ops->elevator_may_queue_fn)
590                 return e->ops->elevator_may_queue_fn(q, rw, bio);
591
592         return ELV_MQUEUE_MAY;
593 }
594
595 void elv_completed_request(request_queue_t *q, struct request *rq)
596 {
597         elevator_t *e = q->elevator;
598
599         /*
600          * request is released from the driver, io must be done
601          */
602         if (blk_account_rq(rq)) {
603                 q->in_flight--;
604                 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
605                         e->ops->elevator_completed_req_fn(q, rq);
606         }
607
608         /*
609          * Check if the queue is waiting for fs requests to be
610          * drained for flush sequence.
611          */
612         if (unlikely(q->ordseq)) {
613                 struct request *first_rq = list_entry_rq(q->queue_head.next);
614                 if (q->in_flight == 0 &&
615                     blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
616                     blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
617                         blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
618                         q->request_fn(q);
619                 }
620         }
621 }
622
623 int elv_register_queue(struct request_queue *q)
624 {
625         elevator_t *e = q->elevator;
626
627         e->kobj.parent = kobject_get(&q->kobj);
628         if (!e->kobj.parent)
629                 return -EBUSY;
630
631         snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
632         e->kobj.ktype = e->elevator_type->elevator_ktype;
633
634         return kobject_register(&e->kobj);
635 }
636
637 void elv_unregister_queue(struct request_queue *q)
638 {
639         if (q) {
640                 elevator_t *e = q->elevator;
641                 kobject_unregister(&e->kobj);
642                 kobject_put(&q->kobj);
643         }
644 }
645
646 int elv_register(struct elevator_type *e)
647 {
648         spin_lock_irq(&elv_list_lock);
649         if (elevator_find(e->elevator_name))
650                 BUG();
651         list_add_tail(&e->list, &elv_list);
652         spin_unlock_irq(&elv_list_lock);
653
654         printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
655         if (!strcmp(e->elevator_name, chosen_elevator) ||
656                         (!*chosen_elevator &&
657                          !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
658                                 printk(" (default)");
659         printk("\n");
660         return 0;
661 }
662 EXPORT_SYMBOL_GPL(elv_register);
663
664 void elv_unregister(struct elevator_type *e)
665 {
666         struct task_struct *g, *p;
667
668         /*
669          * Iterate every thread in the process to remove the io contexts.
670          */
671         read_lock(&tasklist_lock);
672         do_each_thread(g, p) {
673                 struct io_context *ioc = p->io_context;
674                 if (ioc && ioc->cic) {
675                         ioc->cic->exit(ioc->cic);
676                         ioc->cic->dtor(ioc->cic);
677                         ioc->cic = NULL;
678                 }
679                 if (ioc && ioc->aic) {
680                         ioc->aic->exit(ioc->aic);
681                         ioc->aic->dtor(ioc->aic);
682                         ioc->aic = NULL;
683                 }
684         } while_each_thread(g, p);
685         read_unlock(&tasklist_lock);
686
687         spin_lock_irq(&elv_list_lock);
688         list_del_init(&e->list);
689         spin_unlock_irq(&elv_list_lock);
690 }
691 EXPORT_SYMBOL_GPL(elv_unregister);
692
693 /*
694  * switch to new_e io scheduler. be careful not to introduce deadlocks -
695  * we don't free the old io scheduler, before we have allocated what we
696  * need for the new one. this way we have a chance of going back to the old
697  * one, if the new one fails init for some reason.
698  */
699 static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
700 {
701         elevator_t *old_elevator, *e;
702
703         /*
704          * Allocate new elevator
705          */
706         e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
707         if (!e)
708                 goto error;
709
710         /*
711          * Turn on BYPASS and drain all requests w/ elevator private data
712          */
713         spin_lock_irq(q->queue_lock);
714
715         set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
716
717         elv_drain_elevator(q);
718
719         while (q->rq.elvpriv) {
720                 blk_remove_plug(q);
721                 q->request_fn(q);
722                 spin_unlock_irq(q->queue_lock);
723                 msleep(10);
724                 spin_lock_irq(q->queue_lock);
725                 elv_drain_elevator(q);
726         }
727
728         spin_unlock_irq(q->queue_lock);
729
730         /*
731          * unregister old elevator data
732          */
733         elv_unregister_queue(q);
734         old_elevator = q->elevator;
735
736         /*
737          * attach and start new elevator
738          */
739         if (elevator_attach(q, new_e, e))
740                 goto fail;
741
742         if (elv_register_queue(q))
743                 goto fail_register;
744
745         /*
746          * finally exit old elevator and turn off BYPASS.
747          */
748         elevator_exit(old_elevator);
749         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
750         return;
751
752 fail_register:
753         /*
754          * switch failed, exit the new io scheduler and reattach the old
755          * one again (along with re-adding the sysfs dir)
756          */
757         elevator_exit(e);
758         e = NULL;
759 fail:
760         q->elevator = old_elevator;
761         elv_register_queue(q);
762         clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
763         kfree(e);
764 error:
765         elevator_put(new_e);
766         printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
767 }
768
769 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
770 {
771         char elevator_name[ELV_NAME_MAX];
772         size_t len;
773         struct elevator_type *e;
774
775         elevator_name[sizeof(elevator_name) - 1] = '\0';
776         strncpy(elevator_name, name, sizeof(elevator_name) - 1);
777         len = strlen(elevator_name);
778
779         if (len && elevator_name[len - 1] == '\n')
780                 elevator_name[len - 1] = '\0';
781
782         e = elevator_get(elevator_name);
783         if (!e) {
784                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
785                 return -EINVAL;
786         }
787
788         if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
789                 elevator_put(e);
790                 return count;
791         }
792
793         elevator_switch(q, e);
794         return count;
795 }
796
797 ssize_t elv_iosched_show(request_queue_t *q, char *name)
798 {
799         elevator_t *e = q->elevator;
800         struct elevator_type *elv = e->elevator_type;
801         struct list_head *entry;
802         int len = 0;
803
804         spin_lock_irq(q->queue_lock);
805         list_for_each(entry, &elv_list) {
806                 struct elevator_type *__e;
807
808                 __e = list_entry(entry, struct elevator_type, list);
809                 if (!strcmp(elv->elevator_name, __e->elevator_name))
810                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
811                 else
812                         len += sprintf(name+len, "%s ", __e->elevator_name);
813         }
814         spin_unlock_irq(q->queue_lock);
815
816         len += sprintf(len+name, "\n");
817         return len;
818 }
819
820 EXPORT_SYMBOL(elv_dispatch_sort);
821 EXPORT_SYMBOL(elv_add_request);
822 EXPORT_SYMBOL(__elv_add_request);
823 EXPORT_SYMBOL(elv_requeue_request);
824 EXPORT_SYMBOL(elv_next_request);
825 EXPORT_SYMBOL(elv_dequeue_request);
826 EXPORT_SYMBOL(elv_queue_empty);
827 EXPORT_SYMBOL(elv_completed_request);
828 EXPORT_SYMBOL(elevator_exit);
829 EXPORT_SYMBOL(elevator_init);