Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
[linux-2.6] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/rbtree.h>
13 #include <linux/ioprio.h>
14 #include <linux/blktrace_api.h>
15
16 /*
17  * tunables
18  */
19 /* max queue in one round of service */
20 static const int cfq_quantum = 4;
21 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
22 /* maximum backwards seek, in KiB */
23 static const int cfq_back_max = 16 * 1024;
24 /* penalty of a backwards seek */
25 static const int cfq_back_penalty = 2;
26 static const int cfq_slice_sync = HZ / 10;
27 static int cfq_slice_async = HZ / 25;
28 static const int cfq_slice_async_rq = 2;
29 static int cfq_slice_idle = HZ / 125;
30
31 /*
32  * offset from end of service tree
33  */
34 #define CFQ_IDLE_DELAY          (HZ / 5)
35
36 /*
37  * below this threshold, we consider thinktime immediate
38  */
39 #define CFQ_MIN_TT              (2)
40
41 #define CFQ_SLICE_SCALE         (5)
42
43 #define RQ_CIC(rq)              \
44         ((struct cfq_io_context *) (rq)->elevator_private)
45 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private2)
46
47 static struct kmem_cache *cfq_pool;
48 static struct kmem_cache *cfq_ioc_pool;
49
50 static DEFINE_PER_CPU(unsigned long, ioc_count);
51 static struct completion *ioc_gone;
52 static DEFINE_SPINLOCK(ioc_gone_lock);
53
54 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
55 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
56 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
57
58 #define ASYNC                   (0)
59 #define SYNC                    (1)
60
61 #define sample_valid(samples)   ((samples) > 80)
62
63 /*
64  * Most of our rbtree usage is for sorting with min extraction, so
65  * if we cache the leftmost node we don't have to walk down the tree
66  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
67  * move this into the elevator for the rq sorting as well.
68  */
69 struct cfq_rb_root {
70         struct rb_root rb;
71         struct rb_node *left;
72 };
73 #define CFQ_RB_ROOT     (struct cfq_rb_root) { RB_ROOT, NULL, }
74
75 /*
76  * Per block device queue structure
77  */
78 struct cfq_data {
79         struct request_queue *queue;
80
81         /*
82          * rr list of queues with requests and the count of them
83          */
84         struct cfq_rb_root service_tree;
85         unsigned int busy_queues;
86
87         int rq_in_driver;
88         int sync_flight;
89         int hw_tag;
90
91         /*
92          * idle window management
93          */
94         struct timer_list idle_slice_timer;
95         struct work_struct unplug_work;
96
97         struct cfq_queue *active_queue;
98         struct cfq_io_context *active_cic;
99
100         /*
101          * async queue for each priority case
102          */
103         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
104         struct cfq_queue *async_idle_cfqq;
105
106         sector_t last_position;
107         unsigned long last_end_request;
108
109         /*
110          * tunables, see top of file
111          */
112         unsigned int cfq_quantum;
113         unsigned int cfq_fifo_expire[2];
114         unsigned int cfq_back_penalty;
115         unsigned int cfq_back_max;
116         unsigned int cfq_slice[2];
117         unsigned int cfq_slice_async_rq;
118         unsigned int cfq_slice_idle;
119
120         struct list_head cic_list;
121 };
122
123 /*
124  * Per process-grouping structure
125  */
126 struct cfq_queue {
127         /* reference count */
128         atomic_t ref;
129         /* various state flags, see below */
130         unsigned int flags;
131         /* parent cfq_data */
132         struct cfq_data *cfqd;
133         /* service_tree member */
134         struct rb_node rb_node;
135         /* service_tree key */
136         unsigned long rb_key;
137         /* sorted list of pending requests */
138         struct rb_root sort_list;
139         /* if fifo isn't expired, next request to serve */
140         struct request *next_rq;
141         /* requests queued in sort_list */
142         int queued[2];
143         /* currently allocated requests */
144         int allocated[2];
145         /* fifo list of requests in sort_list */
146         struct list_head fifo;
147
148         unsigned long slice_end;
149         long slice_resid;
150
151         /* pending metadata requests */
152         int meta_pending;
153         /* number of requests that are on the dispatch list or inside driver */
154         int dispatched;
155
156         /* io prio of this group */
157         unsigned short ioprio, org_ioprio;
158         unsigned short ioprio_class, org_ioprio_class;
159
160         pid_t pid;
161 };
162
163 enum cfqq_state_flags {
164         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
165         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
166         CFQ_CFQQ_FLAG_must_alloc,       /* must be allowed rq alloc */
167         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
168         CFQ_CFQQ_FLAG_must_dispatch,    /* must dispatch, even if expired */
169         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
170         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
171         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
172         CFQ_CFQQ_FLAG_queue_new,        /* queue never been serviced */
173         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
174         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
175 };
176
177 #define CFQ_CFQQ_FNS(name)                                              \
178 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
179 {                                                                       \
180         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
181 }                                                                       \
182 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
183 {                                                                       \
184         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
185 }                                                                       \
186 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
187 {                                                                       \
188         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
189 }
190
191 CFQ_CFQQ_FNS(on_rr);
192 CFQ_CFQQ_FNS(wait_request);
193 CFQ_CFQQ_FNS(must_alloc);
194 CFQ_CFQQ_FNS(must_alloc_slice);
195 CFQ_CFQQ_FNS(must_dispatch);
196 CFQ_CFQQ_FNS(fifo_expire);
197 CFQ_CFQQ_FNS(idle_window);
198 CFQ_CFQQ_FNS(prio_changed);
199 CFQ_CFQQ_FNS(queue_new);
200 CFQ_CFQQ_FNS(slice_new);
201 CFQ_CFQQ_FNS(sync);
202 #undef CFQ_CFQQ_FNS
203
204 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
205         blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
206 #define cfq_log(cfqd, fmt, args...)     \
207         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
208
209 static void cfq_dispatch_insert(struct request_queue *, struct request *);
210 static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
211                                        struct io_context *, gfp_t);
212 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
213                                                 struct io_context *);
214
215 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
216                                             int is_sync)
217 {
218         return cic->cfqq[!!is_sync];
219 }
220
221 static inline void cic_set_cfqq(struct cfq_io_context *cic,
222                                 struct cfq_queue *cfqq, int is_sync)
223 {
224         cic->cfqq[!!is_sync] = cfqq;
225 }
226
227 /*
228  * We regard a request as SYNC, if it's either a read or has the SYNC bit
229  * set (in which case it could also be direct WRITE).
230  */
231 static inline int cfq_bio_sync(struct bio *bio)
232 {
233         if (bio_data_dir(bio) == READ || bio_sync(bio))
234                 return 1;
235
236         return 0;
237 }
238
239 /*
240  * scheduler run of queue, if there are requests pending and no one in the
241  * driver that will restart queueing
242  */
243 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
244 {
245         if (cfqd->busy_queues) {
246                 cfq_log(cfqd, "schedule dispatch");
247                 kblockd_schedule_work(&cfqd->unplug_work);
248         }
249 }
250
251 static int cfq_queue_empty(struct request_queue *q)
252 {
253         struct cfq_data *cfqd = q->elevator->elevator_data;
254
255         return !cfqd->busy_queues;
256 }
257
258 /*
259  * Scale schedule slice based on io priority. Use the sync time slice only
260  * if a queue is marked sync and has sync io queued. A sync queue with async
261  * io only, should not get full sync slice length.
262  */
263 static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
264                                  unsigned short prio)
265 {
266         const int base_slice = cfqd->cfq_slice[sync];
267
268         WARN_ON(prio >= IOPRIO_BE_NR);
269
270         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
271 }
272
273 static inline int
274 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
275 {
276         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
277 }
278
279 static inline void
280 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
281 {
282         cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
283         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
284 }
285
286 /*
287  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
288  * isn't valid until the first request from the dispatch is activated
289  * and the slice time set.
290  */
291 static inline int cfq_slice_used(struct cfq_queue *cfqq)
292 {
293         if (cfq_cfqq_slice_new(cfqq))
294                 return 0;
295         if (time_before(jiffies, cfqq->slice_end))
296                 return 0;
297
298         return 1;
299 }
300
301 /*
302  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
303  * We choose the request that is closest to the head right now. Distance
304  * behind the head is penalized and only allowed to a certain extent.
305  */
306 static struct request *
307 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
308 {
309         sector_t last, s1, s2, d1 = 0, d2 = 0;
310         unsigned long back_max;
311 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
312 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
313         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
314
315         if (rq1 == NULL || rq1 == rq2)
316                 return rq2;
317         if (rq2 == NULL)
318                 return rq1;
319
320         if (rq_is_sync(rq1) && !rq_is_sync(rq2))
321                 return rq1;
322         else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
323                 return rq2;
324         if (rq_is_meta(rq1) && !rq_is_meta(rq2))
325                 return rq1;
326         else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
327                 return rq2;
328
329         s1 = rq1->sector;
330         s2 = rq2->sector;
331
332         last = cfqd->last_position;
333
334         /*
335          * by definition, 1KiB is 2 sectors
336          */
337         back_max = cfqd->cfq_back_max * 2;
338
339         /*
340          * Strict one way elevator _except_ in the case where we allow
341          * short backward seeks which are biased as twice the cost of a
342          * similar forward seek.
343          */
344         if (s1 >= last)
345                 d1 = s1 - last;
346         else if (s1 + back_max >= last)
347                 d1 = (last - s1) * cfqd->cfq_back_penalty;
348         else
349                 wrap |= CFQ_RQ1_WRAP;
350
351         if (s2 >= last)
352                 d2 = s2 - last;
353         else if (s2 + back_max >= last)
354                 d2 = (last - s2) * cfqd->cfq_back_penalty;
355         else
356                 wrap |= CFQ_RQ2_WRAP;
357
358         /* Found required data */
359
360         /*
361          * By doing switch() on the bit mask "wrap" we avoid having to
362          * check two variables for all permutations: --> faster!
363          */
364         switch (wrap) {
365         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
366                 if (d1 < d2)
367                         return rq1;
368                 else if (d2 < d1)
369                         return rq2;
370                 else {
371                         if (s1 >= s2)
372                                 return rq1;
373                         else
374                                 return rq2;
375                 }
376
377         case CFQ_RQ2_WRAP:
378                 return rq1;
379         case CFQ_RQ1_WRAP:
380                 return rq2;
381         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
382         default:
383                 /*
384                  * Since both rqs are wrapped,
385                  * start with the one that's further behind head
386                  * (--> only *one* back seek required),
387                  * since back seek takes more time than forward.
388                  */
389                 if (s1 <= s2)
390                         return rq1;
391                 else
392                         return rq2;
393         }
394 }
395
396 /*
397  * The below is leftmost cache rbtree addon
398  */
399 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
400 {
401         if (!root->left)
402                 root->left = rb_first(&root->rb);
403
404         if (root->left)
405                 return rb_entry(root->left, struct cfq_queue, rb_node);
406
407         return NULL;
408 }
409
410 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
411 {
412         if (root->left == n)
413                 root->left = NULL;
414
415         rb_erase(n, &root->rb);
416         RB_CLEAR_NODE(n);
417 }
418
419 /*
420  * would be nice to take fifo expire time into account as well
421  */
422 static struct request *
423 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
424                   struct request *last)
425 {
426         struct rb_node *rbnext = rb_next(&last->rb_node);
427         struct rb_node *rbprev = rb_prev(&last->rb_node);
428         struct request *next = NULL, *prev = NULL;
429
430         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
431
432         if (rbprev)
433                 prev = rb_entry_rq(rbprev);
434
435         if (rbnext)
436                 next = rb_entry_rq(rbnext);
437         else {
438                 rbnext = rb_first(&cfqq->sort_list);
439                 if (rbnext && rbnext != &last->rb_node)
440                         next = rb_entry_rq(rbnext);
441         }
442
443         return cfq_choose_req(cfqd, next, prev);
444 }
445
446 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
447                                       struct cfq_queue *cfqq)
448 {
449         /*
450          * just an approximation, should be ok.
451          */
452         return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
453                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
454 }
455
456 /*
457  * The cfqd->service_tree holds all pending cfq_queue's that have
458  * requests waiting to be processed. It is sorted in the order that
459  * we will service the queues.
460  */
461 static void cfq_service_tree_add(struct cfq_data *cfqd,
462                                     struct cfq_queue *cfqq, int add_front)
463 {
464         struct rb_node **p, *parent;
465         struct cfq_queue *__cfqq;
466         unsigned long rb_key;
467         int left;
468
469         if (cfq_class_idle(cfqq)) {
470                 rb_key = CFQ_IDLE_DELAY;
471                 parent = rb_last(&cfqd->service_tree.rb);
472                 if (parent && parent != &cfqq->rb_node) {
473                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
474                         rb_key += __cfqq->rb_key;
475                 } else
476                         rb_key += jiffies;
477         } else if (!add_front) {
478                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
479                 rb_key += cfqq->slice_resid;
480                 cfqq->slice_resid = 0;
481         } else
482                 rb_key = 0;
483
484         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
485                 /*
486                  * same position, nothing more to do
487                  */
488                 if (rb_key == cfqq->rb_key)
489                         return;
490
491                 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
492         }
493
494         left = 1;
495         parent = NULL;
496         p = &cfqd->service_tree.rb.rb_node;
497         while (*p) {
498                 struct rb_node **n;
499
500                 parent = *p;
501                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
502
503                 /*
504                  * sort RT queues first, we always want to give
505                  * preference to them. IDLE queues goes to the back.
506                  * after that, sort on the next service time.
507                  */
508                 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
509                         n = &(*p)->rb_left;
510                 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
511                         n = &(*p)->rb_right;
512                 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
513                         n = &(*p)->rb_left;
514                 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
515                         n = &(*p)->rb_right;
516                 else if (rb_key < __cfqq->rb_key)
517                         n = &(*p)->rb_left;
518                 else
519                         n = &(*p)->rb_right;
520
521                 if (n == &(*p)->rb_right)
522                         left = 0;
523
524                 p = n;
525         }
526
527         if (left)
528                 cfqd->service_tree.left = &cfqq->rb_node;
529
530         cfqq->rb_key = rb_key;
531         rb_link_node(&cfqq->rb_node, parent, p);
532         rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
533 }
534
535 /*
536  * Update cfqq's position in the service tree.
537  */
538 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
539 {
540         /*
541          * Resorting requires the cfqq to be on the RR list already.
542          */
543         if (cfq_cfqq_on_rr(cfqq))
544                 cfq_service_tree_add(cfqd, cfqq, 0);
545 }
546
547 /*
548  * add to busy list of queues for service, trying to be fair in ordering
549  * the pending list according to last request service
550  */
551 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
552 {
553         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
554         BUG_ON(cfq_cfqq_on_rr(cfqq));
555         cfq_mark_cfqq_on_rr(cfqq);
556         cfqd->busy_queues++;
557
558         cfq_resort_rr_list(cfqd, cfqq);
559 }
560
561 /*
562  * Called when the cfqq no longer has requests pending, remove it from
563  * the service tree.
564  */
565 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
566 {
567         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
568         BUG_ON(!cfq_cfqq_on_rr(cfqq));
569         cfq_clear_cfqq_on_rr(cfqq);
570
571         if (!RB_EMPTY_NODE(&cfqq->rb_node))
572                 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
573
574         BUG_ON(!cfqd->busy_queues);
575         cfqd->busy_queues--;
576 }
577
578 /*
579  * rb tree support functions
580  */
581 static void cfq_del_rq_rb(struct request *rq)
582 {
583         struct cfq_queue *cfqq = RQ_CFQQ(rq);
584         struct cfq_data *cfqd = cfqq->cfqd;
585         const int sync = rq_is_sync(rq);
586
587         BUG_ON(!cfqq->queued[sync]);
588         cfqq->queued[sync]--;
589
590         elv_rb_del(&cfqq->sort_list, rq);
591
592         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
593                 cfq_del_cfqq_rr(cfqd, cfqq);
594 }
595
596 static void cfq_add_rq_rb(struct request *rq)
597 {
598         struct cfq_queue *cfqq = RQ_CFQQ(rq);
599         struct cfq_data *cfqd = cfqq->cfqd;
600         struct request *__alias;
601
602         cfqq->queued[rq_is_sync(rq)]++;
603
604         /*
605          * looks a little odd, but the first insert might return an alias.
606          * if that happens, put the alias on the dispatch list
607          */
608         while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
609                 cfq_dispatch_insert(cfqd->queue, __alias);
610
611         if (!cfq_cfqq_on_rr(cfqq))
612                 cfq_add_cfqq_rr(cfqd, cfqq);
613
614         /*
615          * check if this request is a better next-serve candidate
616          */
617         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
618         BUG_ON(!cfqq->next_rq);
619 }
620
621 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
622 {
623         elv_rb_del(&cfqq->sort_list, rq);
624         cfqq->queued[rq_is_sync(rq)]--;
625         cfq_add_rq_rb(rq);
626 }
627
628 static struct request *
629 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
630 {
631         struct task_struct *tsk = current;
632         struct cfq_io_context *cic;
633         struct cfq_queue *cfqq;
634
635         cic = cfq_cic_lookup(cfqd, tsk->io_context);
636         if (!cic)
637                 return NULL;
638
639         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
640         if (cfqq) {
641                 sector_t sector = bio->bi_sector + bio_sectors(bio);
642
643                 return elv_rb_find(&cfqq->sort_list, sector);
644         }
645
646         return NULL;
647 }
648
649 static void cfq_activate_request(struct request_queue *q, struct request *rq)
650 {
651         struct cfq_data *cfqd = q->elevator->elevator_data;
652
653         cfqd->rq_in_driver++;
654         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
655                                                 cfqd->rq_in_driver);
656
657         /*
658          * If the depth is larger 1, it really could be queueing. But lets
659          * make the mark a little higher - idling could still be good for
660          * low queueing, and a low queueing number could also just indicate
661          * a SCSI mid layer like behaviour where limit+1 is often seen.
662          */
663         if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
664                 cfqd->hw_tag = 1;
665
666         cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
667 }
668
669 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
670 {
671         struct cfq_data *cfqd = q->elevator->elevator_data;
672
673         WARN_ON(!cfqd->rq_in_driver);
674         cfqd->rq_in_driver--;
675         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
676                                                 cfqd->rq_in_driver);
677 }
678
679 static void cfq_remove_request(struct request *rq)
680 {
681         struct cfq_queue *cfqq = RQ_CFQQ(rq);
682
683         if (cfqq->next_rq == rq)
684                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
685
686         list_del_init(&rq->queuelist);
687         cfq_del_rq_rb(rq);
688
689         if (rq_is_meta(rq)) {
690                 WARN_ON(!cfqq->meta_pending);
691                 cfqq->meta_pending--;
692         }
693 }
694
695 static int cfq_merge(struct request_queue *q, struct request **req,
696                      struct bio *bio)
697 {
698         struct cfq_data *cfqd = q->elevator->elevator_data;
699         struct request *__rq;
700
701         __rq = cfq_find_rq_fmerge(cfqd, bio);
702         if (__rq && elv_rq_merge_ok(__rq, bio)) {
703                 *req = __rq;
704                 return ELEVATOR_FRONT_MERGE;
705         }
706
707         return ELEVATOR_NO_MERGE;
708 }
709
710 static void cfq_merged_request(struct request_queue *q, struct request *req,
711                                int type)
712 {
713         if (type == ELEVATOR_FRONT_MERGE) {
714                 struct cfq_queue *cfqq = RQ_CFQQ(req);
715
716                 cfq_reposition_rq_rb(cfqq, req);
717         }
718 }
719
720 static void
721 cfq_merged_requests(struct request_queue *q, struct request *rq,
722                     struct request *next)
723 {
724         /*
725          * reposition in fifo if next is older than rq
726          */
727         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
728             time_before(next->start_time, rq->start_time))
729                 list_move(&rq->queuelist, &next->queuelist);
730
731         cfq_remove_request(next);
732 }
733
734 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
735                            struct bio *bio)
736 {
737         struct cfq_data *cfqd = q->elevator->elevator_data;
738         struct cfq_io_context *cic;
739         struct cfq_queue *cfqq;
740
741         /*
742          * Disallow merge of a sync bio into an async request.
743          */
744         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
745                 return 0;
746
747         /*
748          * Lookup the cfqq that this bio will be queued with. Allow
749          * merge only if rq is queued there.
750          */
751         cic = cfq_cic_lookup(cfqd, current->io_context);
752         if (!cic)
753                 return 0;
754
755         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
756         if (cfqq == RQ_CFQQ(rq))
757                 return 1;
758
759         return 0;
760 }
761
762 static void __cfq_set_active_queue(struct cfq_data *cfqd,
763                                    struct cfq_queue *cfqq)
764 {
765         if (cfqq) {
766                 cfq_log_cfqq(cfqd, cfqq, "set_active");
767                 cfqq->slice_end = 0;
768                 cfq_clear_cfqq_must_alloc_slice(cfqq);
769                 cfq_clear_cfqq_fifo_expire(cfqq);
770                 cfq_mark_cfqq_slice_new(cfqq);
771                 cfq_clear_cfqq_queue_new(cfqq);
772         }
773
774         cfqd->active_queue = cfqq;
775 }
776
777 /*
778  * current cfqq expired its slice (or was too idle), select new one
779  */
780 static void
781 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
782                     int timed_out)
783 {
784         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
785
786         if (cfq_cfqq_wait_request(cfqq))
787                 del_timer(&cfqd->idle_slice_timer);
788
789         cfq_clear_cfqq_must_dispatch(cfqq);
790         cfq_clear_cfqq_wait_request(cfqq);
791
792         /*
793          * store what was left of this slice, if the queue idled/timed out
794          */
795         if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
796                 cfqq->slice_resid = cfqq->slice_end - jiffies;
797                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
798         }
799
800         cfq_resort_rr_list(cfqd, cfqq);
801
802         if (cfqq == cfqd->active_queue)
803                 cfqd->active_queue = NULL;
804
805         if (cfqd->active_cic) {
806                 put_io_context(cfqd->active_cic->ioc);
807                 cfqd->active_cic = NULL;
808         }
809 }
810
811 static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
812 {
813         struct cfq_queue *cfqq = cfqd->active_queue;
814
815         if (cfqq)
816                 __cfq_slice_expired(cfqd, cfqq, timed_out);
817 }
818
819 /*
820  * Get next queue for service. Unless we have a queue preemption,
821  * we'll simply select the first cfqq in the service tree.
822  */
823 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
824 {
825         if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
826                 return NULL;
827
828         return cfq_rb_first(&cfqd->service_tree);
829 }
830
831 /*
832  * Get and set a new active queue for service.
833  */
834 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
835 {
836         struct cfq_queue *cfqq;
837
838         cfqq = cfq_get_next_queue(cfqd);
839         __cfq_set_active_queue(cfqd, cfqq);
840         return cfqq;
841 }
842
843 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
844                                           struct request *rq)
845 {
846         if (rq->sector >= cfqd->last_position)
847                 return rq->sector - cfqd->last_position;
848         else
849                 return cfqd->last_position - rq->sector;
850 }
851
852 static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
853 {
854         struct cfq_io_context *cic = cfqd->active_cic;
855
856         if (!sample_valid(cic->seek_samples))
857                 return 0;
858
859         return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
860 }
861
862 static int cfq_close_cooperator(struct cfq_data *cfq_data,
863                                 struct cfq_queue *cfqq)
864 {
865         /*
866          * We should notice if some of the queues are cooperating, eg
867          * working closely on the same area of the disk. In that case,
868          * we can group them together and don't waste time idling.
869          */
870         return 0;
871 }
872
873 #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
874
875 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
876 {
877         struct cfq_queue *cfqq = cfqd->active_queue;
878         struct cfq_io_context *cic;
879         unsigned long sl;
880
881         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
882         WARN_ON(cfq_cfqq_slice_new(cfqq));
883
884         /*
885          * idle is disabled, either manually or by past process history
886          */
887         if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
888                 return;
889
890         /*
891          * still requests with the driver, don't idle
892          */
893         if (cfqd->rq_in_driver)
894                 return;
895
896         /*
897          * task has exited, don't wait
898          */
899         cic = cfqd->active_cic;
900         if (!cic || !atomic_read(&cic->ioc->nr_tasks))
901                 return;
902
903         /*
904          * See if this prio level has a good candidate
905          */
906         if (cfq_close_cooperator(cfqd, cfqq) &&
907             (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
908                 return;
909
910         cfq_mark_cfqq_must_dispatch(cfqq);
911         cfq_mark_cfqq_wait_request(cfqq);
912
913         /*
914          * we don't want to idle for seeks, but we do want to allow
915          * fair distribution of slice time for a process doing back-to-back
916          * seeks. so allow a little bit of time for him to submit a new rq
917          */
918         sl = cfqd->cfq_slice_idle;
919         if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
920                 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
921
922         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
923         cfq_log(cfqd, "arm_idle: %lu", sl);
924 }
925
926 /*
927  * Move request from internal lists to the request queue dispatch list.
928  */
929 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
930 {
931         struct cfq_data *cfqd = q->elevator->elevator_data;
932         struct cfq_queue *cfqq = RQ_CFQQ(rq);
933
934         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
935
936         cfq_remove_request(rq);
937         cfqq->dispatched++;
938         elv_dispatch_sort(q, rq);
939
940         if (cfq_cfqq_sync(cfqq))
941                 cfqd->sync_flight++;
942 }
943
944 /*
945  * return expired entry, or NULL to just start from scratch in rbtree
946  */
947 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
948 {
949         struct cfq_data *cfqd = cfqq->cfqd;
950         struct request *rq;
951         int fifo;
952
953         if (cfq_cfqq_fifo_expire(cfqq))
954                 return NULL;
955
956         cfq_mark_cfqq_fifo_expire(cfqq);
957
958         if (list_empty(&cfqq->fifo))
959                 return NULL;
960
961         fifo = cfq_cfqq_sync(cfqq);
962         rq = rq_entry_fifo(cfqq->fifo.next);
963
964         if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
965                 rq = NULL;
966
967         cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
968         return rq;
969 }
970
971 static inline int
972 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
973 {
974         const int base_rq = cfqd->cfq_slice_async_rq;
975
976         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
977
978         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
979 }
980
981 /*
982  * Select a queue for service. If we have a current active queue,
983  * check whether to continue servicing it, or retrieve and set a new one.
984  */
985 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
986 {
987         struct cfq_queue *cfqq;
988
989         cfqq = cfqd->active_queue;
990         if (!cfqq)
991                 goto new_queue;
992
993         /*
994          * The active queue has run out of time, expire it and select new.
995          */
996         if (cfq_slice_used(cfqq))
997                 goto expire;
998
999         /*
1000          * The active queue has requests and isn't expired, allow it to
1001          * dispatch.
1002          */
1003         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
1004                 goto keep_queue;
1005
1006         /*
1007          * No requests pending. If the active queue still has requests in
1008          * flight or is idling for a new request, allow either of these
1009          * conditions to happen (or time out) before selecting a new queue.
1010          */
1011         if (timer_pending(&cfqd->idle_slice_timer) ||
1012             (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
1013                 cfqq = NULL;
1014                 goto keep_queue;
1015         }
1016
1017 expire:
1018         cfq_slice_expired(cfqd, 0);
1019 new_queue:
1020         cfqq = cfq_set_active_queue(cfqd);
1021 keep_queue:
1022         return cfqq;
1023 }
1024
1025 /*
1026  * Dispatch some requests from cfqq, moving them to the request queue
1027  * dispatch list.
1028  */
1029 static int
1030 __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1031                         int max_dispatch)
1032 {
1033         int dispatched = 0;
1034
1035         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1036
1037         do {
1038                 struct request *rq;
1039
1040                 /*
1041                  * follow expired path, else get first next available
1042                  */
1043                 rq = cfq_check_fifo(cfqq);
1044                 if (rq == NULL)
1045                         rq = cfqq->next_rq;
1046
1047                 /*
1048                  * finally, insert request into driver dispatch list
1049                  */
1050                 cfq_dispatch_insert(cfqd->queue, rq);
1051
1052                 dispatched++;
1053
1054                 if (!cfqd->active_cic) {
1055                         atomic_inc(&RQ_CIC(rq)->ioc->refcount);
1056                         cfqd->active_cic = RQ_CIC(rq);
1057                 }
1058
1059                 if (RB_EMPTY_ROOT(&cfqq->sort_list))
1060                         break;
1061
1062         } while (dispatched < max_dispatch);
1063
1064         /*
1065          * expire an async queue immediately if it has used up its slice. idle
1066          * queue always expire after 1 dispatch round.
1067          */
1068         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1069             dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1070             cfq_class_idle(cfqq))) {
1071                 cfqq->slice_end = jiffies + 1;
1072                 cfq_slice_expired(cfqd, 0);
1073         }
1074
1075         return dispatched;
1076 }
1077
1078 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1079 {
1080         int dispatched = 0;
1081
1082         while (cfqq->next_rq) {
1083                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1084                 dispatched++;
1085         }
1086
1087         BUG_ON(!list_empty(&cfqq->fifo));
1088         return dispatched;
1089 }
1090
1091 /*
1092  * Drain our current requests. Used for barriers and when switching
1093  * io schedulers on-the-fly.
1094  */
1095 static int cfq_forced_dispatch(struct cfq_data *cfqd)
1096 {
1097         struct cfq_queue *cfqq;
1098         int dispatched = 0;
1099
1100         while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
1101                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1102
1103         cfq_slice_expired(cfqd, 0);
1104
1105         BUG_ON(cfqd->busy_queues);
1106
1107         cfq_log(cfqd, "forced_dispatch=%d\n", dispatched);
1108         return dispatched;
1109 }
1110
1111 static int cfq_dispatch_requests(struct request_queue *q, int force)
1112 {
1113         struct cfq_data *cfqd = q->elevator->elevator_data;
1114         struct cfq_queue *cfqq;
1115         int dispatched;
1116
1117         if (!cfqd->busy_queues)
1118                 return 0;
1119
1120         if (unlikely(force))
1121                 return cfq_forced_dispatch(cfqd);
1122
1123         dispatched = 0;
1124         while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1125                 int max_dispatch;
1126
1127                 max_dispatch = cfqd->cfq_quantum;
1128                 if (cfq_class_idle(cfqq))
1129                         max_dispatch = 1;
1130
1131                 if (cfqq->dispatched >= max_dispatch) {
1132                         if (cfqd->busy_queues > 1)
1133                                 break;
1134                         if (cfqq->dispatched >= 4 * max_dispatch)
1135                                 break;
1136                 }
1137
1138                 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1139                         break;
1140
1141                 cfq_clear_cfqq_must_dispatch(cfqq);
1142                 cfq_clear_cfqq_wait_request(cfqq);
1143                 del_timer(&cfqd->idle_slice_timer);
1144
1145                 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1146         }
1147
1148         cfq_log(cfqd, "dispatched=%d", dispatched);
1149         return dispatched;
1150 }
1151
1152 /*
1153  * task holds one reference to the queue, dropped when task exits. each rq
1154  * in-flight on this queue also holds a reference, dropped when rq is freed.
1155  *
1156  * queue lock must be held here.
1157  */
1158 static void cfq_put_queue(struct cfq_queue *cfqq)
1159 {
1160         struct cfq_data *cfqd = cfqq->cfqd;
1161
1162         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1163
1164         if (!atomic_dec_and_test(&cfqq->ref))
1165                 return;
1166
1167         cfq_log_cfqq(cfqd, cfqq, "put_queue");
1168         BUG_ON(rb_first(&cfqq->sort_list));
1169         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1170         BUG_ON(cfq_cfqq_on_rr(cfqq));
1171
1172         if (unlikely(cfqd->active_queue == cfqq)) {
1173                 __cfq_slice_expired(cfqd, cfqq, 0);
1174                 cfq_schedule_dispatch(cfqd);
1175         }
1176
1177         kmem_cache_free(cfq_pool, cfqq);
1178 }
1179
1180 /*
1181  * Must always be called with the rcu_read_lock() held
1182  */
1183 static void
1184 __call_for_each_cic(struct io_context *ioc,
1185                     void (*func)(struct io_context *, struct cfq_io_context *))
1186 {
1187         struct cfq_io_context *cic;
1188         struct hlist_node *n;
1189
1190         hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1191                 func(ioc, cic);
1192 }
1193
1194 /*
1195  * Call func for each cic attached to this ioc.
1196  */
1197 static void
1198 call_for_each_cic(struct io_context *ioc,
1199                   void (*func)(struct io_context *, struct cfq_io_context *))
1200 {
1201         rcu_read_lock();
1202         __call_for_each_cic(ioc, func);
1203         rcu_read_unlock();
1204 }
1205
1206 static void cfq_cic_free_rcu(struct rcu_head *head)
1207 {
1208         struct cfq_io_context *cic;
1209
1210         cic = container_of(head, struct cfq_io_context, rcu_head);
1211
1212         kmem_cache_free(cfq_ioc_pool, cic);
1213         elv_ioc_count_dec(ioc_count);
1214
1215         if (ioc_gone) {
1216                 /*
1217                  * CFQ scheduler is exiting, grab exit lock and check
1218                  * the pending io context count. If it hits zero,
1219                  * complete ioc_gone and set it back to NULL
1220                  */
1221                 spin_lock(&ioc_gone_lock);
1222                 if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
1223                         complete(ioc_gone);
1224                         ioc_gone = NULL;
1225                 }
1226                 spin_unlock(&ioc_gone_lock);
1227         }
1228 }
1229
1230 static void cfq_cic_free(struct cfq_io_context *cic)
1231 {
1232         call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
1233 }
1234
1235 static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1236 {
1237         unsigned long flags;
1238
1239         BUG_ON(!cic->dead_key);
1240
1241         spin_lock_irqsave(&ioc->lock, flags);
1242         radix_tree_delete(&ioc->radix_root, cic->dead_key);
1243         hlist_del_rcu(&cic->cic_list);
1244         spin_unlock_irqrestore(&ioc->lock, flags);
1245
1246         cfq_cic_free(cic);
1247 }
1248
1249 /*
1250  * Must be called with rcu_read_lock() held or preemption otherwise disabled.
1251  * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
1252  * and ->trim() which is called with the task lock held
1253  */
1254 static void cfq_free_io_context(struct io_context *ioc)
1255 {
1256         /*
1257          * ioc->refcount is zero here, or we are called from elv_unregister(),
1258          * so no more cic's are allowed to be linked into this ioc.  So it
1259          * should be ok to iterate over the known list, we will see all cic's
1260          * since no new ones are added.
1261          */
1262         __call_for_each_cic(ioc, cic_free_func);
1263 }
1264
1265 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1266 {
1267         if (unlikely(cfqq == cfqd->active_queue)) {
1268                 __cfq_slice_expired(cfqd, cfqq, 0);
1269                 cfq_schedule_dispatch(cfqd);
1270         }
1271
1272         cfq_put_queue(cfqq);
1273 }
1274
1275 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1276                                          struct cfq_io_context *cic)
1277 {
1278         struct io_context *ioc = cic->ioc;
1279
1280         list_del_init(&cic->queue_list);
1281
1282         /*
1283          * Make sure key == NULL is seen for dead queues
1284          */
1285         smp_wmb();
1286         cic->dead_key = (unsigned long) cic->key;
1287         cic->key = NULL;
1288
1289         if (ioc->ioc_data == cic)
1290                 rcu_assign_pointer(ioc->ioc_data, NULL);
1291
1292         if (cic->cfqq[ASYNC]) {
1293                 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1294                 cic->cfqq[ASYNC] = NULL;
1295         }
1296
1297         if (cic->cfqq[SYNC]) {
1298                 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1299                 cic->cfqq[SYNC] = NULL;
1300         }
1301 }
1302
1303 static void cfq_exit_single_io_context(struct io_context *ioc,
1304                                        struct cfq_io_context *cic)
1305 {
1306         struct cfq_data *cfqd = cic->key;
1307
1308         if (cfqd) {
1309                 struct request_queue *q = cfqd->queue;
1310                 unsigned long flags;
1311
1312                 spin_lock_irqsave(q->queue_lock, flags);
1313                 __cfq_exit_single_io_context(cfqd, cic);
1314                 spin_unlock_irqrestore(q->queue_lock, flags);
1315         }
1316 }
1317
1318 /*
1319  * The process that ioc belongs to has exited, we need to clean up
1320  * and put the internal structures we have that belongs to that process.
1321  */
1322 static void cfq_exit_io_context(struct io_context *ioc)
1323 {
1324         call_for_each_cic(ioc, cfq_exit_single_io_context);
1325 }
1326
1327 static struct cfq_io_context *
1328 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1329 {
1330         struct cfq_io_context *cic;
1331
1332         cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1333                                                         cfqd->queue->node);
1334         if (cic) {
1335                 cic->last_end_request = jiffies;
1336                 INIT_LIST_HEAD(&cic->queue_list);
1337                 INIT_HLIST_NODE(&cic->cic_list);
1338                 cic->dtor = cfq_free_io_context;
1339                 cic->exit = cfq_exit_io_context;
1340                 elv_ioc_count_inc(ioc_count);
1341         }
1342
1343         return cic;
1344 }
1345
1346 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
1347 {
1348         struct task_struct *tsk = current;
1349         int ioprio_class;
1350
1351         if (!cfq_cfqq_prio_changed(cfqq))
1352                 return;
1353
1354         ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
1355         switch (ioprio_class) {
1356         default:
1357                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1358         case IOPRIO_CLASS_NONE:
1359                 /*
1360                  * no prio set, inherit CPU scheduling settings
1361                  */
1362                 cfqq->ioprio = task_nice_ioprio(tsk);
1363                 cfqq->ioprio_class = task_nice_ioclass(tsk);
1364                 break;
1365         case IOPRIO_CLASS_RT:
1366                 cfqq->ioprio = task_ioprio(ioc);
1367                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
1368                 break;
1369         case IOPRIO_CLASS_BE:
1370                 cfqq->ioprio = task_ioprio(ioc);
1371                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1372                 break;
1373         case IOPRIO_CLASS_IDLE:
1374                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1375                 cfqq->ioprio = 7;
1376                 cfq_clear_cfqq_idle_window(cfqq);
1377                 break;
1378         }
1379
1380         /*
1381          * keep track of original prio settings in case we have to temporarily
1382          * elevate the priority of this queue
1383          */
1384         cfqq->org_ioprio = cfqq->ioprio;
1385         cfqq->org_ioprio_class = cfqq->ioprio_class;
1386         cfq_clear_cfqq_prio_changed(cfqq);
1387 }
1388
1389 static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1390 {
1391         struct cfq_data *cfqd = cic->key;
1392         struct cfq_queue *cfqq;
1393         unsigned long flags;
1394
1395         if (unlikely(!cfqd))
1396                 return;
1397
1398         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1399
1400         cfqq = cic->cfqq[ASYNC];
1401         if (cfqq) {
1402                 struct cfq_queue *new_cfqq;
1403                 new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC);
1404                 if (new_cfqq) {
1405                         cic->cfqq[ASYNC] = new_cfqq;
1406                         cfq_put_queue(cfqq);
1407                 }
1408         }
1409
1410         cfqq = cic->cfqq[SYNC];
1411         if (cfqq)
1412                 cfq_mark_cfqq_prio_changed(cfqq);
1413
1414         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1415 }
1416
1417 static void cfq_ioc_set_ioprio(struct io_context *ioc)
1418 {
1419         call_for_each_cic(ioc, changed_ioprio);
1420         ioc->ioprio_changed = 0;
1421 }
1422
1423 static struct cfq_queue *
1424 cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
1425                      struct io_context *ioc, gfp_t gfp_mask)
1426 {
1427         struct cfq_queue *cfqq, *new_cfqq = NULL;
1428         struct cfq_io_context *cic;
1429
1430 retry:
1431         cic = cfq_cic_lookup(cfqd, ioc);
1432         /* cic always exists here */
1433         cfqq = cic_to_cfqq(cic, is_sync);
1434
1435         if (!cfqq) {
1436                 if (new_cfqq) {
1437                         cfqq = new_cfqq;
1438                         new_cfqq = NULL;
1439                 } else if (gfp_mask & __GFP_WAIT) {
1440                         /*
1441                          * Inform the allocator of the fact that we will
1442                          * just repeat this allocation if it fails, to allow
1443                          * the allocator to do whatever it needs to attempt to
1444                          * free memory.
1445                          */
1446                         spin_unlock_irq(cfqd->queue->queue_lock);
1447                         new_cfqq = kmem_cache_alloc_node(cfq_pool,
1448                                         gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
1449                                         cfqd->queue->node);
1450                         spin_lock_irq(cfqd->queue->queue_lock);
1451                         goto retry;
1452                 } else {
1453                         cfqq = kmem_cache_alloc_node(cfq_pool,
1454                                         gfp_mask | __GFP_ZERO,
1455                                         cfqd->queue->node);
1456                         if (!cfqq)
1457                                 goto out;
1458                 }
1459
1460                 RB_CLEAR_NODE(&cfqq->rb_node);
1461                 INIT_LIST_HEAD(&cfqq->fifo);
1462
1463                 atomic_set(&cfqq->ref, 0);
1464                 cfqq->cfqd = cfqd;
1465
1466                 cfq_mark_cfqq_prio_changed(cfqq);
1467                 cfq_mark_cfqq_queue_new(cfqq);
1468
1469                 cfq_init_prio_data(cfqq, ioc);
1470
1471                 if (is_sync) {
1472                         if (!cfq_class_idle(cfqq))
1473                                 cfq_mark_cfqq_idle_window(cfqq);
1474                         cfq_mark_cfqq_sync(cfqq);
1475                 }
1476                 cfqq->pid = current->pid;
1477                 cfq_log_cfqq(cfqd, cfqq, "alloced");
1478         }
1479
1480         if (new_cfqq)
1481                 kmem_cache_free(cfq_pool, new_cfqq);
1482
1483 out:
1484         WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1485         return cfqq;
1486 }
1487
1488 static struct cfq_queue **
1489 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1490 {
1491         switch (ioprio_class) {
1492         case IOPRIO_CLASS_RT:
1493                 return &cfqd->async_cfqq[0][ioprio];
1494         case IOPRIO_CLASS_BE:
1495                 return &cfqd->async_cfqq[1][ioprio];
1496         case IOPRIO_CLASS_IDLE:
1497                 return &cfqd->async_idle_cfqq;
1498         default:
1499                 BUG();
1500         }
1501 }
1502
1503 static struct cfq_queue *
1504 cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
1505               gfp_t gfp_mask)
1506 {
1507         const int ioprio = task_ioprio(ioc);
1508         const int ioprio_class = task_ioprio_class(ioc);
1509         struct cfq_queue **async_cfqq = NULL;
1510         struct cfq_queue *cfqq = NULL;
1511
1512         if (!is_sync) {
1513                 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
1514                 cfqq = *async_cfqq;
1515         }
1516
1517         if (!cfqq) {
1518                 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
1519                 if (!cfqq)
1520                         return NULL;
1521         }
1522
1523         /*
1524          * pin the queue now that it's allocated, scheduler exit will prune it
1525          */
1526         if (!is_sync && !(*async_cfqq)) {
1527                 atomic_inc(&cfqq->ref);
1528                 *async_cfqq = cfqq;
1529         }
1530
1531         atomic_inc(&cfqq->ref);
1532         return cfqq;
1533 }
1534
1535 /*
1536  * We drop cfq io contexts lazily, so we may find a dead one.
1537  */
1538 static void
1539 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
1540                   struct cfq_io_context *cic)
1541 {
1542         unsigned long flags;
1543
1544         WARN_ON(!list_empty(&cic->queue_list));
1545
1546         spin_lock_irqsave(&ioc->lock, flags);
1547
1548         BUG_ON(ioc->ioc_data == cic);
1549
1550         radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
1551         hlist_del_rcu(&cic->cic_list);
1552         spin_unlock_irqrestore(&ioc->lock, flags);
1553
1554         cfq_cic_free(cic);
1555 }
1556
1557 static struct cfq_io_context *
1558 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1559 {
1560         struct cfq_io_context *cic;
1561         unsigned long flags;
1562         void *k;
1563
1564         if (unlikely(!ioc))
1565                 return NULL;
1566
1567         rcu_read_lock();
1568
1569         /*
1570          * we maintain a last-hit cache, to avoid browsing over the tree
1571          */
1572         cic = rcu_dereference(ioc->ioc_data);
1573         if (cic && cic->key == cfqd) {
1574                 rcu_read_unlock();
1575                 return cic;
1576         }
1577
1578         do {
1579                 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
1580                 rcu_read_unlock();
1581                 if (!cic)
1582                         break;
1583                 /* ->key must be copied to avoid race with cfq_exit_queue() */
1584                 k = cic->key;
1585                 if (unlikely(!k)) {
1586                         cfq_drop_dead_cic(cfqd, ioc, cic);
1587                         rcu_read_lock();
1588                         continue;
1589                 }
1590
1591                 spin_lock_irqsave(&ioc->lock, flags);
1592                 rcu_assign_pointer(ioc->ioc_data, cic);
1593                 spin_unlock_irqrestore(&ioc->lock, flags);
1594                 break;
1595         } while (1);
1596
1597         return cic;
1598 }
1599
1600 /*
1601  * Add cic into ioc, using cfqd as the search key. This enables us to lookup
1602  * the process specific cfq io context when entered from the block layer.
1603  * Also adds the cic to a per-cfqd list, used when this queue is removed.
1604  */
1605 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1606                         struct cfq_io_context *cic, gfp_t gfp_mask)
1607 {
1608         unsigned long flags;
1609         int ret;
1610
1611         ret = radix_tree_preload(gfp_mask);
1612         if (!ret) {
1613                 cic->ioc = ioc;
1614                 cic->key = cfqd;
1615
1616                 spin_lock_irqsave(&ioc->lock, flags);
1617                 ret = radix_tree_insert(&ioc->radix_root,
1618                                                 (unsigned long) cfqd, cic);
1619                 if (!ret)
1620                         hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
1621                 spin_unlock_irqrestore(&ioc->lock, flags);
1622
1623                 radix_tree_preload_end();
1624
1625                 if (!ret) {
1626                         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1627                         list_add(&cic->queue_list, &cfqd->cic_list);
1628                         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1629                 }
1630         }
1631
1632         if (ret)
1633                 printk(KERN_ERR "cfq: cic link failed!\n");
1634
1635         return ret;
1636 }
1637
1638 /*
1639  * Setup general io context and cfq io context. There can be several cfq
1640  * io contexts per general io context, if this process is doing io to more
1641  * than one device managed by cfq.
1642  */
1643 static struct cfq_io_context *
1644 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1645 {
1646         struct io_context *ioc = NULL;
1647         struct cfq_io_context *cic;
1648
1649         might_sleep_if(gfp_mask & __GFP_WAIT);
1650
1651         ioc = get_io_context(gfp_mask, cfqd->queue->node);
1652         if (!ioc)
1653                 return NULL;
1654
1655         cic = cfq_cic_lookup(cfqd, ioc);
1656         if (cic)
1657                 goto out;
1658
1659         cic = cfq_alloc_io_context(cfqd, gfp_mask);
1660         if (cic == NULL)
1661                 goto err;
1662
1663         if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
1664                 goto err_free;
1665
1666 out:
1667         smp_read_barrier_depends();
1668         if (unlikely(ioc->ioprio_changed))
1669                 cfq_ioc_set_ioprio(ioc);
1670
1671         return cic;
1672 err_free:
1673         cfq_cic_free(cic);
1674 err:
1675         put_io_context(ioc);
1676         return NULL;
1677 }
1678
1679 static void
1680 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1681 {
1682         unsigned long elapsed = jiffies - cic->last_end_request;
1683         unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1684
1685         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1686         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1687         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1688 }
1689
1690 static void
1691 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1692                        struct request *rq)
1693 {
1694         sector_t sdist;
1695         u64 total;
1696
1697         if (cic->last_request_pos < rq->sector)
1698                 sdist = rq->sector - cic->last_request_pos;
1699         else
1700                 sdist = cic->last_request_pos - rq->sector;
1701
1702         /*
1703          * Don't allow the seek distance to get too large from the
1704          * odd fragment, pagein, etc
1705          */
1706         if (cic->seek_samples <= 60) /* second&third seek */
1707                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1708         else
1709                 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1710
1711         cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1712         cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1713         total = cic->seek_total + (cic->seek_samples/2);
1714         do_div(total, cic->seek_samples);
1715         cic->seek_mean = (sector_t)total;
1716 }
1717
1718 /*
1719  * Disable idle window if the process thinks too long or seeks so much that
1720  * it doesn't matter
1721  */
1722 static void
1723 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1724                        struct cfq_io_context *cic)
1725 {
1726         int old_idle, enable_idle;
1727
1728         /*
1729          * Don't idle for async or idle io prio class
1730          */
1731         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1732                 return;
1733
1734         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1735
1736         if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
1737             (cfqd->hw_tag && CIC_SEEKY(cic)))
1738                 enable_idle = 0;
1739         else if (sample_valid(cic->ttime_samples)) {
1740                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1741                         enable_idle = 0;
1742                 else
1743                         enable_idle = 1;
1744         }
1745
1746         if (old_idle != enable_idle) {
1747                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
1748                 if (enable_idle)
1749                         cfq_mark_cfqq_idle_window(cfqq);
1750                 else
1751                         cfq_clear_cfqq_idle_window(cfqq);
1752         }
1753 }
1754
1755 /*
1756  * Check if new_cfqq should preempt the currently active queue. Return 0 for
1757  * no or if we aren't sure, a 1 will cause a preempt.
1758  */
1759 static int
1760 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1761                    struct request *rq)
1762 {
1763         struct cfq_queue *cfqq;
1764
1765         cfqq = cfqd->active_queue;
1766         if (!cfqq)
1767                 return 0;
1768
1769         if (cfq_slice_used(cfqq))
1770                 return 1;
1771
1772         if (cfq_class_idle(new_cfqq))
1773                 return 0;
1774
1775         if (cfq_class_idle(cfqq))
1776                 return 1;
1777
1778         /*
1779          * if the new request is sync, but the currently running queue is
1780          * not, let the sync request have priority.
1781          */
1782         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1783                 return 1;
1784
1785         /*
1786          * So both queues are sync. Let the new request get disk time if
1787          * it's a metadata request and the current queue is doing regular IO.
1788          */
1789         if (rq_is_meta(rq) && !cfqq->meta_pending)
1790                 return 1;
1791
1792         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
1793                 return 0;
1794
1795         /*
1796          * if this request is as-good as one we would expect from the
1797          * current cfqq, let it preempt
1798          */
1799         if (cfq_rq_close(cfqd, rq))
1800                 return 1;
1801
1802         return 0;
1803 }
1804
1805 /*
1806  * cfqq preempts the active queue. if we allowed preempt with no slice left,
1807  * let it have half of its nominal slice.
1808  */
1809 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1810 {
1811         cfq_log_cfqq(cfqd, cfqq, "preempt");
1812         cfq_slice_expired(cfqd, 1);
1813
1814         /*
1815          * Put the new queue at the front of the of the current list,
1816          * so we know that it will be selected next.
1817          */
1818         BUG_ON(!cfq_cfqq_on_rr(cfqq));
1819
1820         cfq_service_tree_add(cfqd, cfqq, 1);
1821
1822         cfqq->slice_end = 0;
1823         cfq_mark_cfqq_slice_new(cfqq);
1824 }
1825
1826 /*
1827  * Called when a new fs request (rq) is added (to cfqq). Check if there's
1828  * something we should do about it
1829  */
1830 static void
1831 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1832                 struct request *rq)
1833 {
1834         struct cfq_io_context *cic = RQ_CIC(rq);
1835
1836         if (rq_is_meta(rq))
1837                 cfqq->meta_pending++;
1838
1839         cfq_update_io_thinktime(cfqd, cic);
1840         cfq_update_io_seektime(cfqd, cic, rq);
1841         cfq_update_idle_window(cfqd, cfqq, cic);
1842
1843         cic->last_request_pos = rq->sector + rq->nr_sectors;
1844
1845         if (cfqq == cfqd->active_queue) {
1846                 /*
1847                  * if we are waiting for a request for this queue, let it rip
1848                  * immediately and flag that we must not expire this queue
1849                  * just now
1850                  */
1851                 if (cfq_cfqq_wait_request(cfqq)) {
1852                         cfq_mark_cfqq_must_dispatch(cfqq);
1853                         del_timer(&cfqd->idle_slice_timer);
1854                         blk_start_queueing(cfqd->queue);
1855                 }
1856         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1857                 /*
1858                  * not the active queue - expire current slice if it is
1859                  * idle and has expired it's mean thinktime or this new queue
1860                  * has some old slice time left and is of higher priority
1861                  */
1862                 cfq_preempt_queue(cfqd, cfqq);
1863                 cfq_mark_cfqq_must_dispatch(cfqq);
1864                 blk_start_queueing(cfqd->queue);
1865         }
1866 }
1867
1868 static void cfq_insert_request(struct request_queue *q, struct request *rq)
1869 {
1870         struct cfq_data *cfqd = q->elevator->elevator_data;
1871         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1872
1873         cfq_log_cfqq(cfqd, cfqq, "insert_request");
1874         cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
1875
1876         cfq_add_rq_rb(rq);
1877
1878         list_add_tail(&rq->queuelist, &cfqq->fifo);
1879
1880         cfq_rq_enqueued(cfqd, cfqq, rq);
1881 }
1882
1883 static void cfq_completed_request(struct request_queue *q, struct request *rq)
1884 {
1885         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1886         struct cfq_data *cfqd = cfqq->cfqd;
1887         const int sync = rq_is_sync(rq);
1888         unsigned long now;
1889
1890         now = jiffies;
1891         cfq_log_cfqq(cfqd, cfqq, "complete");
1892
1893         WARN_ON(!cfqd->rq_in_driver);
1894         WARN_ON(!cfqq->dispatched);
1895         cfqd->rq_in_driver--;
1896         cfqq->dispatched--;
1897
1898         if (cfq_cfqq_sync(cfqq))
1899                 cfqd->sync_flight--;
1900
1901         if (!cfq_class_idle(cfqq))
1902                 cfqd->last_end_request = now;
1903
1904         if (sync)
1905                 RQ_CIC(rq)->last_end_request = now;
1906
1907         /*
1908          * If this is the active queue, check if it needs to be expired,
1909          * or if we want to idle in case it has no pending requests.
1910          */
1911         if (cfqd->active_queue == cfqq) {
1912                 if (cfq_cfqq_slice_new(cfqq)) {
1913                         cfq_set_prio_slice(cfqd, cfqq);
1914                         cfq_clear_cfqq_slice_new(cfqq);
1915                 }
1916                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
1917                         cfq_slice_expired(cfqd, 1);
1918                 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
1919                         cfq_arm_slice_timer(cfqd);
1920         }
1921
1922         if (!cfqd->rq_in_driver)
1923                 cfq_schedule_dispatch(cfqd);
1924 }
1925
1926 /*
1927  * we temporarily boost lower priority queues if they are holding fs exclusive
1928  * resources. they are boosted to normal prio (CLASS_BE/4)
1929  */
1930 static void cfq_prio_boost(struct cfq_queue *cfqq)
1931 {
1932         if (has_fs_excl()) {
1933                 /*
1934                  * boost idle prio on transactions that would lock out other
1935                  * users of the filesystem
1936                  */
1937                 if (cfq_class_idle(cfqq))
1938                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1939                 if (cfqq->ioprio > IOPRIO_NORM)
1940                         cfqq->ioprio = IOPRIO_NORM;
1941         } else {
1942                 /*
1943                  * check if we need to unboost the queue
1944                  */
1945                 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1946                         cfqq->ioprio_class = cfqq->org_ioprio_class;
1947                 if (cfqq->ioprio != cfqq->org_ioprio)
1948                         cfqq->ioprio = cfqq->org_ioprio;
1949         }
1950 }
1951
1952 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1953 {
1954         if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1955             !cfq_cfqq_must_alloc_slice(cfqq)) {
1956                 cfq_mark_cfqq_must_alloc_slice(cfqq);
1957                 return ELV_MQUEUE_MUST;
1958         }
1959
1960         return ELV_MQUEUE_MAY;
1961 }
1962
1963 static int cfq_may_queue(struct request_queue *q, int rw)
1964 {
1965         struct cfq_data *cfqd = q->elevator->elevator_data;
1966         struct task_struct *tsk = current;
1967         struct cfq_io_context *cic;
1968         struct cfq_queue *cfqq;
1969
1970         /*
1971          * don't force setup of a queue from here, as a call to may_queue
1972          * does not necessarily imply that a request actually will be queued.
1973          * so just lookup a possibly existing queue, or return 'may queue'
1974          * if that fails
1975          */
1976         cic = cfq_cic_lookup(cfqd, tsk->io_context);
1977         if (!cic)
1978                 return ELV_MQUEUE_MAY;
1979
1980         cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
1981         if (cfqq) {
1982                 cfq_init_prio_data(cfqq, cic->ioc);
1983                 cfq_prio_boost(cfqq);
1984
1985                 return __cfq_may_queue(cfqq);
1986         }
1987
1988         return ELV_MQUEUE_MAY;
1989 }
1990
1991 /*
1992  * queue lock held here
1993  */
1994 static void cfq_put_request(struct request *rq)
1995 {
1996         struct cfq_queue *cfqq = RQ_CFQQ(rq);
1997
1998         if (cfqq) {
1999                 const int rw = rq_data_dir(rq);
2000
2001                 BUG_ON(!cfqq->allocated[rw]);
2002                 cfqq->allocated[rw]--;
2003
2004                 put_io_context(RQ_CIC(rq)->ioc);
2005
2006                 rq->elevator_private = NULL;
2007                 rq->elevator_private2 = NULL;
2008
2009                 cfq_put_queue(cfqq);
2010         }
2011 }
2012
2013 /*
2014  * Allocate cfq data structures associated with this request.
2015  */
2016 static int
2017 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2018 {
2019         struct cfq_data *cfqd = q->elevator->elevator_data;
2020         struct cfq_io_context *cic;
2021         const int rw = rq_data_dir(rq);
2022         const int is_sync = rq_is_sync(rq);
2023         struct cfq_queue *cfqq;
2024         unsigned long flags;
2025
2026         might_sleep_if(gfp_mask & __GFP_WAIT);
2027
2028         cic = cfq_get_io_context(cfqd, gfp_mask);
2029
2030         spin_lock_irqsave(q->queue_lock, flags);
2031
2032         if (!cic)
2033                 goto queue_fail;
2034
2035         cfqq = cic_to_cfqq(cic, is_sync);
2036         if (!cfqq) {
2037                 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2038
2039                 if (!cfqq)
2040                         goto queue_fail;
2041
2042                 cic_set_cfqq(cic, cfqq, is_sync);
2043         }
2044
2045         cfqq->allocated[rw]++;
2046         cfq_clear_cfqq_must_alloc(cfqq);
2047         atomic_inc(&cfqq->ref);
2048
2049         spin_unlock_irqrestore(q->queue_lock, flags);
2050
2051         rq->elevator_private = cic;
2052         rq->elevator_private2 = cfqq;
2053         return 0;
2054
2055 queue_fail:
2056         if (cic)
2057                 put_io_context(cic->ioc);
2058
2059         cfq_schedule_dispatch(cfqd);
2060         spin_unlock_irqrestore(q->queue_lock, flags);
2061         cfq_log(cfqd, "set_request fail");
2062         return 1;
2063 }
2064
2065 static void cfq_kick_queue(struct work_struct *work)
2066 {
2067         struct cfq_data *cfqd =
2068                 container_of(work, struct cfq_data, unplug_work);
2069         struct request_queue *q = cfqd->queue;
2070         unsigned long flags;
2071
2072         spin_lock_irqsave(q->queue_lock, flags);
2073         blk_start_queueing(q);
2074         spin_unlock_irqrestore(q->queue_lock, flags);
2075 }
2076
2077 /*
2078  * Timer running if the active_queue is currently idling inside its time slice
2079  */
2080 static void cfq_idle_slice_timer(unsigned long data)
2081 {
2082         struct cfq_data *cfqd = (struct cfq_data *) data;
2083         struct cfq_queue *cfqq;
2084         unsigned long flags;
2085         int timed_out = 1;
2086
2087         cfq_log(cfqd, "idle timer fired");
2088
2089         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2090
2091         cfqq = cfqd->active_queue;
2092         if (cfqq) {
2093                 timed_out = 0;
2094
2095                 /*
2096                  * expired
2097                  */
2098                 if (cfq_slice_used(cfqq))
2099                         goto expire;
2100
2101                 /*
2102                  * only expire and reinvoke request handler, if there are
2103                  * other queues with pending requests
2104                  */
2105                 if (!cfqd->busy_queues)
2106                         goto out_cont;
2107
2108                 /*
2109                  * not expired and it has a request pending, let it dispatch
2110                  */
2111                 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
2112                         cfq_mark_cfqq_must_dispatch(cfqq);
2113                         goto out_kick;
2114                 }
2115         }
2116 expire:
2117         cfq_slice_expired(cfqd, timed_out);
2118 out_kick:
2119         cfq_schedule_dispatch(cfqd);
2120 out_cont:
2121         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2122 }
2123
2124 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2125 {
2126         del_timer_sync(&cfqd->idle_slice_timer);
2127         kblockd_flush_work(&cfqd->unplug_work);
2128 }
2129
2130 static void cfq_put_async_queues(struct cfq_data *cfqd)
2131 {
2132         int i;
2133
2134         for (i = 0; i < IOPRIO_BE_NR; i++) {
2135                 if (cfqd->async_cfqq[0][i])
2136                         cfq_put_queue(cfqd->async_cfqq[0][i]);
2137                 if (cfqd->async_cfqq[1][i])
2138                         cfq_put_queue(cfqd->async_cfqq[1][i]);
2139         }
2140
2141         if (cfqd->async_idle_cfqq)
2142                 cfq_put_queue(cfqd->async_idle_cfqq);
2143 }
2144
2145 static void cfq_exit_queue(elevator_t *e)
2146 {
2147         struct cfq_data *cfqd = e->elevator_data;
2148         struct request_queue *q = cfqd->queue;
2149
2150         cfq_shutdown_timer_wq(cfqd);
2151
2152         spin_lock_irq(q->queue_lock);
2153
2154         if (cfqd->active_queue)
2155                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2156
2157         while (!list_empty(&cfqd->cic_list)) {
2158                 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2159                                                         struct cfq_io_context,
2160                                                         queue_list);
2161
2162                 __cfq_exit_single_io_context(cfqd, cic);
2163         }
2164
2165         cfq_put_async_queues(cfqd);
2166
2167         spin_unlock_irq(q->queue_lock);
2168
2169         cfq_shutdown_timer_wq(cfqd);
2170
2171         kfree(cfqd);
2172 }
2173
2174 static void *cfq_init_queue(struct request_queue *q)
2175 {
2176         struct cfq_data *cfqd;
2177
2178         cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2179         if (!cfqd)
2180                 return NULL;
2181
2182         cfqd->service_tree = CFQ_RB_ROOT;
2183         INIT_LIST_HEAD(&cfqd->cic_list);
2184
2185         cfqd->queue = q;
2186
2187         init_timer(&cfqd->idle_slice_timer);
2188         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2189         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2190
2191         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2192
2193         cfqd->last_end_request = jiffies;
2194         cfqd->cfq_quantum = cfq_quantum;
2195         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2196         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2197         cfqd->cfq_back_max = cfq_back_max;
2198         cfqd->cfq_back_penalty = cfq_back_penalty;
2199         cfqd->cfq_slice[0] = cfq_slice_async;
2200         cfqd->cfq_slice[1] = cfq_slice_sync;
2201         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2202         cfqd->cfq_slice_idle = cfq_slice_idle;
2203
2204         return cfqd;
2205 }
2206
2207 static void cfq_slab_kill(void)
2208 {
2209         /*
2210          * Caller already ensured that pending RCU callbacks are completed,
2211          * so we should have no busy allocations at this point.
2212          */
2213         if (cfq_pool)
2214                 kmem_cache_destroy(cfq_pool);
2215         if (cfq_ioc_pool)
2216                 kmem_cache_destroy(cfq_ioc_pool);
2217 }
2218
2219 static int __init cfq_slab_setup(void)
2220 {
2221         cfq_pool = KMEM_CACHE(cfq_queue, 0);
2222         if (!cfq_pool)
2223                 goto fail;
2224
2225         cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
2226         if (!cfq_ioc_pool)
2227                 goto fail;
2228
2229         return 0;
2230 fail:
2231         cfq_slab_kill();
2232         return -ENOMEM;
2233 }
2234
2235 /*
2236  * sysfs parts below -->
2237  */
2238 static ssize_t
2239 cfq_var_show(unsigned int var, char *page)
2240 {
2241         return sprintf(page, "%d\n", var);
2242 }
2243
2244 static ssize_t
2245 cfq_var_store(unsigned int *var, const char *page, size_t count)
2246 {
2247         char *p = (char *) page;
2248
2249         *var = simple_strtoul(p, &p, 10);
2250         return count;
2251 }
2252
2253 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2254 static ssize_t __FUNC(elevator_t *e, char *page)                        \
2255 {                                                                       \
2256         struct cfq_data *cfqd = e->elevator_data;                       \
2257         unsigned int __data = __VAR;                                    \
2258         if (__CONV)                                                     \
2259                 __data = jiffies_to_msecs(__data);                      \
2260         return cfq_var_show(__data, (page));                            \
2261 }
2262 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2263 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2264 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2265 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2266 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2267 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2268 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2269 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2270 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2271 #undef SHOW_FUNCTION
2272
2273 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2274 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
2275 {                                                                       \
2276         struct cfq_data *cfqd = e->elevator_data;                       \
2277         unsigned int __data;                                            \
2278         int ret = cfq_var_store(&__data, (page), count);                \
2279         if (__data < (MIN))                                             \
2280                 __data = (MIN);                                         \
2281         else if (__data > (MAX))                                        \
2282                 __data = (MAX);                                         \
2283         if (__CONV)                                                     \
2284                 *(__PTR) = msecs_to_jiffies(__data);                    \
2285         else                                                            \
2286                 *(__PTR) = __data;                                      \
2287         return ret;                                                     \
2288 }
2289 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2290 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
2291                 UINT_MAX, 1);
2292 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
2293                 UINT_MAX, 1);
2294 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2295 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
2296                 UINT_MAX, 0);
2297 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2298 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2299 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2300 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
2301                 UINT_MAX, 0);
2302 #undef STORE_FUNCTION
2303
2304 #define CFQ_ATTR(name) \
2305         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2306
2307 static struct elv_fs_entry cfq_attrs[] = {
2308         CFQ_ATTR(quantum),
2309         CFQ_ATTR(fifo_expire_sync),
2310         CFQ_ATTR(fifo_expire_async),
2311         CFQ_ATTR(back_seek_max),
2312         CFQ_ATTR(back_seek_penalty),
2313         CFQ_ATTR(slice_sync),
2314         CFQ_ATTR(slice_async),
2315         CFQ_ATTR(slice_async_rq),
2316         CFQ_ATTR(slice_idle),
2317         __ATTR_NULL
2318 };
2319
2320 static struct elevator_type iosched_cfq = {
2321         .ops = {
2322                 .elevator_merge_fn =            cfq_merge,
2323                 .elevator_merged_fn =           cfq_merged_request,
2324                 .elevator_merge_req_fn =        cfq_merged_requests,
2325                 .elevator_allow_merge_fn =      cfq_allow_merge,
2326                 .elevator_dispatch_fn =         cfq_dispatch_requests,
2327                 .elevator_add_req_fn =          cfq_insert_request,
2328                 .elevator_activate_req_fn =     cfq_activate_request,
2329                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
2330                 .elevator_queue_empty_fn =      cfq_queue_empty,
2331                 .elevator_completed_req_fn =    cfq_completed_request,
2332                 .elevator_former_req_fn =       elv_rb_former_request,
2333                 .elevator_latter_req_fn =       elv_rb_latter_request,
2334                 .elevator_set_req_fn =          cfq_set_request,
2335                 .elevator_put_req_fn =          cfq_put_request,
2336                 .elevator_may_queue_fn =        cfq_may_queue,
2337                 .elevator_init_fn =             cfq_init_queue,
2338                 .elevator_exit_fn =             cfq_exit_queue,
2339                 .trim =                         cfq_free_io_context,
2340         },
2341         .elevator_attrs =       cfq_attrs,
2342         .elevator_name =        "cfq",
2343         .elevator_owner =       THIS_MODULE,
2344 };
2345
2346 static int __init cfq_init(void)
2347 {
2348         /*
2349          * could be 0 on HZ < 1000 setups
2350          */
2351         if (!cfq_slice_async)
2352                 cfq_slice_async = 1;
2353         if (!cfq_slice_idle)
2354                 cfq_slice_idle = 1;
2355
2356         if (cfq_slab_setup())
2357                 return -ENOMEM;
2358
2359         elv_register(&iosched_cfq);
2360
2361         return 0;
2362 }
2363
2364 static void __exit cfq_exit(void)
2365 {
2366         DECLARE_COMPLETION_ONSTACK(all_gone);
2367         elv_unregister(&iosched_cfq);
2368         ioc_gone = &all_gone;
2369         /* ioc_gone's update must be visible before reading ioc_count */
2370         smp_wmb();
2371
2372         /*
2373          * this also protects us from entering cfq_slab_kill() with
2374          * pending RCU callbacks
2375          */
2376         if (elv_ioc_count_read(ioc_count))
2377                 wait_for_completion(&all_gone);
2378         cfq_slab_kill();
2379 }
2380
2381 module_init(cfq_init);
2382 module_exit(cfq_exit);
2383
2384 MODULE_AUTHOR("Jens Axboe");
2385 MODULE_LICENSE("GPL");
2386 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");