net: Use queue aware tests throughout.
[linux-2.6] / net / sched / sch_generic.c
1 /*
2  * net/sched/sch_generic.c      Generic packet scheduler routines.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
11  *              - Ingress support
12  */
13
14 #include <linux/bitops.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
24 #include <linux/init.h>
25 #include <linux/rcupdate.h>
26 #include <linux/list.h>
27 #include <net/pkt_sched.h>
28
29 /* Main transmission queue. */
30
31 /* Modifications to data participating in scheduling must be protected with
32  * queue->lock spinlock.
33  *
34  * The idea is the following:
35  * - enqueue, dequeue are serialized via top level device
36  *   spinlock queue->lock.
37  * - ingress filtering is serialized via top level device
38  *   spinlock dev->rx_queue.lock.
39  * - updates to tree and tree walking are only done under the rtnl mutex.
40  */
41
42 void qdisc_lock_tree(struct net_device *dev)
43         __acquires(dev->rx_queue.lock)
44 {
45         unsigned int i;
46
47         local_bh_disable();
48         for (i = 0; i < dev->num_tx_queues; i++) {
49                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
50                 spin_lock(&txq->lock);
51         }
52         spin_lock(&dev->rx_queue.lock);
53 }
54 EXPORT_SYMBOL(qdisc_lock_tree);
55
56 void qdisc_unlock_tree(struct net_device *dev)
57         __releases(dev->rx_queue.lock)
58 {
59         unsigned int i;
60
61         spin_unlock(&dev->rx_queue.lock);
62         for (i = 0; i < dev->num_tx_queues; i++) {
63                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
64                 spin_unlock(&txq->lock);
65         }
66         local_bh_enable();
67 }
68 EXPORT_SYMBOL(qdisc_unlock_tree);
69
70 static inline int qdisc_qlen(struct Qdisc *q)
71 {
72         return q->q.qlen;
73 }
74
75 static inline int dev_requeue_skb(struct sk_buff *skb,
76                                   struct netdev_queue *dev_queue,
77                                   struct Qdisc *q)
78 {
79         if (unlikely(skb->next))
80                 dev_queue->gso_skb = skb;
81         else
82                 q->ops->requeue(skb, q);
83
84         netif_schedule_queue(dev_queue);
85         return 0;
86 }
87
88 static inline struct sk_buff *dequeue_skb(struct netdev_queue *dev_queue,
89                                           struct Qdisc *q)
90 {
91         struct sk_buff *skb;
92
93         if ((skb = dev_queue->gso_skb))
94                 dev_queue->gso_skb = NULL;
95         else
96                 skb = q->dequeue(q);
97
98         return skb;
99 }
100
101 static inline int handle_dev_cpu_collision(struct sk_buff *skb,
102                                            struct netdev_queue *dev_queue,
103                                            struct Qdisc *q)
104 {
105         int ret;
106
107         if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
108                 /*
109                  * Same CPU holding the lock. It may be a transient
110                  * configuration error, when hard_start_xmit() recurses. We
111                  * detect it by checking xmit owner and drop the packet when
112                  * deadloop is detected. Return OK to try the next skb.
113                  */
114                 kfree_skb(skb);
115                 if (net_ratelimit())
116                         printk(KERN_WARNING "Dead loop on netdevice %s, "
117                                "fix it urgently!\n", dev_queue->dev->name);
118                 ret = qdisc_qlen(q);
119         } else {
120                 /*
121                  * Another cpu is holding lock, requeue & delay xmits for
122                  * some time.
123                  */
124                 __get_cpu_var(netdev_rx_stat).cpu_collision++;
125                 ret = dev_requeue_skb(skb, dev_queue, q);
126         }
127
128         return ret;
129 }
130
131 /*
132  * NOTE: Called under queue->lock with locally disabled BH.
133  *
134  * __QUEUE_STATE_QDISC_RUNNING guarantees only one CPU can process
135  * this queue at a time. queue->lock serializes queue accesses for
136  * this queue AND txq->qdisc pointer itself.
137  *
138  *  netif_tx_lock serializes accesses to device driver.
139  *
140  *  queue->lock and netif_tx_lock are mutually exclusive,
141  *  if one is grabbed, another must be free.
142  *
143  * Note, that this procedure can be called by a watchdog timer
144  *
145  * Returns to the caller:
146  *                              0  - queue is empty or throttled.
147  *                              >0 - queue is not empty.
148  *
149  */
150 static inline int qdisc_restart(struct netdev_queue *txq)
151 {
152         struct Qdisc *q = txq->qdisc;
153         int ret = NETDEV_TX_BUSY;
154         struct net_device *dev;
155         struct sk_buff *skb;
156
157         /* Dequeue packet */
158         if (unlikely((skb = dequeue_skb(txq, q)) == NULL))
159                 return 0;
160
161
162         /* And release queue */
163         spin_unlock(&txq->lock);
164
165         dev = txq->dev;
166
167         HARD_TX_LOCK(dev, txq, smp_processor_id());
168         if (!netif_subqueue_stopped(dev, skb))
169                 ret = dev_hard_start_xmit(skb, dev, txq);
170         HARD_TX_UNLOCK(dev, txq);
171
172         spin_lock(&txq->lock);
173         q = txq->qdisc;
174
175         switch (ret) {
176         case NETDEV_TX_OK:
177                 /* Driver sent out skb successfully */
178                 ret = qdisc_qlen(q);
179                 break;
180
181         case NETDEV_TX_LOCKED:
182                 /* Driver try lock failed */
183                 ret = handle_dev_cpu_collision(skb, txq, q);
184                 break;
185
186         default:
187                 /* Driver returned NETDEV_TX_BUSY - requeue skb */
188                 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
189                         printk(KERN_WARNING "BUG %s code %d qlen %d\n",
190                                dev->name, ret, q->q.qlen);
191
192                 ret = dev_requeue_skb(skb, txq, q);
193                 break;
194         }
195
196         return ret;
197 }
198
199 void __qdisc_run(struct netdev_queue *txq)
200 {
201         unsigned long start_time = jiffies;
202
203         while (qdisc_restart(txq)) {
204                 if (netif_tx_queue_stopped(txq))
205                         break;
206
207                 /*
208                  * Postpone processing if
209                  * 1. another process needs the CPU;
210                  * 2. we've been doing it for too long.
211                  */
212                 if (need_resched() || jiffies != start_time) {
213                         netif_schedule_queue(txq);
214                         break;
215                 }
216         }
217
218         clear_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state);
219 }
220
221 static void dev_watchdog(unsigned long arg)
222 {
223         struct net_device *dev = (struct net_device *)arg;
224
225         netif_tx_lock(dev);
226         if (!qdisc_tx_is_noop(dev)) {
227                 if (netif_device_present(dev) &&
228                     netif_running(dev) &&
229                     netif_carrier_ok(dev)) {
230                         int some_queue_stopped = 0;
231                         unsigned int i;
232
233                         for (i = 0; i < dev->num_tx_queues; i++) {
234                                 struct netdev_queue *txq;
235
236                                 txq = netdev_get_tx_queue(dev, i);
237                                 if (netif_tx_queue_stopped(txq)) {
238                                         some_queue_stopped = 1;
239                                         break;
240                                 }
241                         }
242
243                         if (some_queue_stopped &&
244                             time_after(jiffies, (dev->trans_start +
245                                                  dev->watchdog_timeo))) {
246                                 printk(KERN_INFO "NETDEV WATCHDOG: %s: "
247                                        "transmit timed out\n",
248                                        dev->name);
249                                 dev->tx_timeout(dev);
250                                 WARN_ON_ONCE(1);
251                         }
252                         if (!mod_timer(&dev->watchdog_timer,
253                                        round_jiffies(jiffies +
254                                                      dev->watchdog_timeo)))
255                                 dev_hold(dev);
256                 }
257         }
258         netif_tx_unlock(dev);
259
260         dev_put(dev);
261 }
262
263 void __netdev_watchdog_up(struct net_device *dev)
264 {
265         if (dev->tx_timeout) {
266                 if (dev->watchdog_timeo <= 0)
267                         dev->watchdog_timeo = 5*HZ;
268                 if (!mod_timer(&dev->watchdog_timer,
269                                round_jiffies(jiffies + dev->watchdog_timeo)))
270                         dev_hold(dev);
271         }
272 }
273
274 static void dev_watchdog_up(struct net_device *dev)
275 {
276         __netdev_watchdog_up(dev);
277 }
278
279 static void dev_watchdog_down(struct net_device *dev)
280 {
281         netif_tx_lock_bh(dev);
282         if (del_timer(&dev->watchdog_timer))
283                 dev_put(dev);
284         netif_tx_unlock_bh(dev);
285 }
286
287 /**
288  *      netif_carrier_on - set carrier
289  *      @dev: network device
290  *
291  * Device has detected that carrier.
292  */
293 void netif_carrier_on(struct net_device *dev)
294 {
295         if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
296                 linkwatch_fire_event(dev);
297                 if (netif_running(dev))
298                         __netdev_watchdog_up(dev);
299         }
300 }
301 EXPORT_SYMBOL(netif_carrier_on);
302
303 /**
304  *      netif_carrier_off - clear carrier
305  *      @dev: network device
306  *
307  * Device has detected loss of carrier.
308  */
309 void netif_carrier_off(struct net_device *dev)
310 {
311         if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
312                 linkwatch_fire_event(dev);
313 }
314 EXPORT_SYMBOL(netif_carrier_off);
315
316 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
317    under all circumstances. It is difficult to invent anything faster or
318    cheaper.
319  */
320
321 static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
322 {
323         kfree_skb(skb);
324         return NET_XMIT_CN;
325 }
326
327 static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
328 {
329         return NULL;
330 }
331
332 static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
333 {
334         if (net_ratelimit())
335                 printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
336                        skb->dev->name);
337         kfree_skb(skb);
338         return NET_XMIT_CN;
339 }
340
341 struct Qdisc_ops noop_qdisc_ops __read_mostly = {
342         .id             =       "noop",
343         .priv_size      =       0,
344         .enqueue        =       noop_enqueue,
345         .dequeue        =       noop_dequeue,
346         .requeue        =       noop_requeue,
347         .owner          =       THIS_MODULE,
348 };
349
350 struct Qdisc noop_qdisc = {
351         .enqueue        =       noop_enqueue,
352         .dequeue        =       noop_dequeue,
353         .flags          =       TCQ_F_BUILTIN,
354         .ops            =       &noop_qdisc_ops,
355         .list           =       LIST_HEAD_INIT(noop_qdisc.list),
356 };
357 EXPORT_SYMBOL(noop_qdisc);
358
359 static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
360         .id             =       "noqueue",
361         .priv_size      =       0,
362         .enqueue        =       noop_enqueue,
363         .dequeue        =       noop_dequeue,
364         .requeue        =       noop_requeue,
365         .owner          =       THIS_MODULE,
366 };
367
368 static struct Qdisc noqueue_qdisc = {
369         .enqueue        =       NULL,
370         .dequeue        =       noop_dequeue,
371         .flags          =       TCQ_F_BUILTIN,
372         .ops            =       &noqueue_qdisc_ops,
373         .list           =       LIST_HEAD_INIT(noqueue_qdisc.list),
374 };
375
376
377 static const u8 prio2band[TC_PRIO_MAX+1] =
378         { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
379
380 /* 3-band FIFO queue: old style, but should be a bit faster than
381    generic prio+fifo combination.
382  */
383
384 #define PFIFO_FAST_BANDS 3
385
386 static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
387                                              struct Qdisc *qdisc)
388 {
389         struct sk_buff_head *list = qdisc_priv(qdisc);
390         return list + prio2band[skb->priority & TC_PRIO_MAX];
391 }
392
393 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
394 {
395         struct sk_buff_head *list = prio2list(skb, qdisc);
396
397         if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
398                 qdisc->q.qlen++;
399                 return __qdisc_enqueue_tail(skb, qdisc, list);
400         }
401
402         return qdisc_drop(skb, qdisc);
403 }
404
405 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
406 {
407         int prio;
408         struct sk_buff_head *list = qdisc_priv(qdisc);
409
410         for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
411                 if (!skb_queue_empty(list + prio)) {
412                         qdisc->q.qlen--;
413                         return __qdisc_dequeue_head(qdisc, list + prio);
414                 }
415         }
416
417         return NULL;
418 }
419
420 static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
421 {
422         qdisc->q.qlen++;
423         return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
424 }
425
426 static void pfifo_fast_reset(struct Qdisc* qdisc)
427 {
428         int prio;
429         struct sk_buff_head *list = qdisc_priv(qdisc);
430
431         for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
432                 __qdisc_reset_queue(qdisc, list + prio);
433
434         qdisc->qstats.backlog = 0;
435         qdisc->q.qlen = 0;
436 }
437
438 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
439 {
440         struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
441
442         memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
443         NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
444         return skb->len;
445
446 nla_put_failure:
447         return -1;
448 }
449
450 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
451 {
452         int prio;
453         struct sk_buff_head *list = qdisc_priv(qdisc);
454
455         for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
456                 skb_queue_head_init(list + prio);
457
458         return 0;
459 }
460
461 static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
462         .id             =       "pfifo_fast",
463         .priv_size      =       PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
464         .enqueue        =       pfifo_fast_enqueue,
465         .dequeue        =       pfifo_fast_dequeue,
466         .requeue        =       pfifo_fast_requeue,
467         .init           =       pfifo_fast_init,
468         .reset          =       pfifo_fast_reset,
469         .dump           =       pfifo_fast_dump,
470         .owner          =       THIS_MODULE,
471 };
472
473 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
474                           struct Qdisc_ops *ops)
475 {
476         void *p;
477         struct Qdisc *sch;
478         unsigned int size;
479         int err = -ENOBUFS;
480
481         /* ensure that the Qdisc and the private data are 32-byte aligned */
482         size = QDISC_ALIGN(sizeof(*sch));
483         size += ops->priv_size + (QDISC_ALIGNTO - 1);
484
485         p = kzalloc(size, GFP_KERNEL);
486         if (!p)
487                 goto errout;
488         sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
489         sch->padded = (char *) sch - (char *) p;
490
491         INIT_LIST_HEAD(&sch->list);
492         skb_queue_head_init(&sch->q);
493         sch->ops = ops;
494         sch->enqueue = ops->enqueue;
495         sch->dequeue = ops->dequeue;
496         sch->dev_queue = dev_queue;
497         dev_hold(qdisc_dev(sch));
498         atomic_set(&sch->refcnt, 1);
499
500         return sch;
501 errout:
502         return ERR_PTR(err);
503 }
504
505 struct Qdisc * qdisc_create_dflt(struct net_device *dev,
506                                  struct netdev_queue *dev_queue,
507                                  struct Qdisc_ops *ops,
508                                  unsigned int parentid)
509 {
510         struct Qdisc *sch;
511
512         sch = qdisc_alloc(dev_queue, ops);
513         if (IS_ERR(sch))
514                 goto errout;
515         sch->parent = parentid;
516
517         if (!ops->init || ops->init(sch, NULL) == 0)
518                 return sch;
519
520         qdisc_destroy(sch);
521 errout:
522         return NULL;
523 }
524 EXPORT_SYMBOL(qdisc_create_dflt);
525
526 /* Under queue->lock and BH! */
527
528 void qdisc_reset(struct Qdisc *qdisc)
529 {
530         const struct Qdisc_ops *ops = qdisc->ops;
531
532         if (ops->reset)
533                 ops->reset(qdisc);
534 }
535 EXPORT_SYMBOL(qdisc_reset);
536
537 /* this is the rcu callback function to clean up a qdisc when there
538  * are no further references to it */
539
540 static void __qdisc_destroy(struct rcu_head *head)
541 {
542         struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
543         kfree((char *) qdisc - qdisc->padded);
544 }
545
546 /* Under queue->lock and BH! */
547
548 void qdisc_destroy(struct Qdisc *qdisc)
549 {
550         const struct Qdisc_ops  *ops = qdisc->ops;
551
552         if (qdisc->flags & TCQ_F_BUILTIN ||
553             !atomic_dec_and_test(&qdisc->refcnt))
554                 return;
555
556         list_del(&qdisc->list);
557         gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
558         if (ops->reset)
559                 ops->reset(qdisc);
560         if (ops->destroy)
561                 ops->destroy(qdisc);
562
563         module_put(ops->owner);
564         dev_put(qdisc_dev(qdisc));
565         call_rcu(&qdisc->q_rcu, __qdisc_destroy);
566 }
567 EXPORT_SYMBOL(qdisc_destroy);
568
569 static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
570 {
571         unsigned int i;
572
573         for (i = 0; i < dev->num_tx_queues; i++) {
574                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
575
576                 if (txq->qdisc_sleeping != &noop_qdisc)
577                         return false;
578         }
579         return true;
580 }
581
582 static void attach_one_default_qdisc(struct net_device *dev,
583                                      struct netdev_queue *dev_queue,
584                                      void *_unused)
585 {
586         struct Qdisc *qdisc;
587
588         if (dev->tx_queue_len) {
589                 qdisc = qdisc_create_dflt(dev, dev_queue,
590                                           &pfifo_fast_ops, TC_H_ROOT);
591                 if (!qdisc) {
592                         printk(KERN_INFO "%s: activation failed\n", dev->name);
593                         return;
594                 }
595                 list_add_tail(&qdisc->list, &dev_queue->qdisc_list);
596         } else {
597                 qdisc =  &noqueue_qdisc;
598         }
599         dev_queue->qdisc_sleeping = qdisc;
600 }
601
602 static void transition_one_qdisc(struct net_device *dev,
603                                  struct netdev_queue *dev_queue,
604                                  void *_need_watchdog)
605 {
606         int *need_watchdog_p = _need_watchdog;
607
608         spin_lock_bh(&dev_queue->lock);
609         rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping);
610         if (dev_queue->qdisc != &noqueue_qdisc)
611                 *need_watchdog_p = 1;
612         spin_unlock_bh(&dev_queue->lock);
613 }
614
615 void dev_activate(struct net_device *dev)
616 {
617         int need_watchdog;
618
619         /* No queueing discipline is attached to device;
620            create default one i.e. pfifo_fast for devices,
621            which need queueing and noqueue_qdisc for
622            virtual interfaces
623          */
624
625         if (dev_all_qdisc_sleeping_noop(dev))
626                 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
627
628         if (!netif_carrier_ok(dev))
629                 /* Delay activation until next carrier-on event */
630                 return;
631
632         need_watchdog = 0;
633         netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
634
635         if (need_watchdog) {
636                 dev->trans_start = jiffies;
637                 dev_watchdog_up(dev);
638         }
639 }
640
641 static void dev_deactivate_queue(struct net_device *dev,
642                                  struct netdev_queue *dev_queue,
643                                  void *_qdisc_default)
644 {
645         struct Qdisc *qdisc_default = _qdisc_default;
646         struct Qdisc *qdisc;
647         struct sk_buff *skb;
648
649         spin_lock_bh(&dev_queue->lock);
650
651         qdisc = dev_queue->qdisc;
652         if (qdisc) {
653                 dev_queue->qdisc = qdisc_default;
654                 qdisc_reset(qdisc);
655         }
656         skb = dev_queue->gso_skb;
657         dev_queue->gso_skb = NULL;
658
659         spin_unlock_bh(&dev_queue->lock);
660
661         kfree_skb(skb);
662 }
663
664 static bool some_qdisc_is_running(struct net_device *dev, int lock)
665 {
666         unsigned int i;
667
668         for (i = 0; i < dev->num_tx_queues; i++) {
669                 struct netdev_queue *dev_queue;
670                 int val;
671
672                 dev_queue = netdev_get_tx_queue(dev, i);
673
674                 if (lock)
675                         spin_lock_bh(&dev_queue->lock);
676
677                 val = test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state);
678
679                 if (lock)
680                         spin_unlock_bh(&dev_queue->lock);
681
682                 if (val)
683                         return true;
684         }
685         return false;
686 }
687
688 void dev_deactivate(struct net_device *dev)
689 {
690         bool running;
691
692         netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
693
694         dev_watchdog_down(dev);
695
696         /* Wait for outstanding qdisc-less dev_queue_xmit calls. */
697         synchronize_rcu();
698
699         /* Wait for outstanding qdisc_run calls. */
700         do {
701                 while (some_qdisc_is_running(dev, 0))
702                         yield();
703
704                 /*
705                  * Double-check inside queue lock to ensure that all effects
706                  * of the queue run are visible when we return.
707                  */
708                 running = some_qdisc_is_running(dev, 1);
709
710                 /*
711                  * The running flag should never be set at this point because
712                  * we've already set dev->qdisc to noop_qdisc *inside* the same
713                  * pair of spin locks.  That is, if any qdisc_run starts after
714                  * our initial test it should see the noop_qdisc and then
715                  * clear the RUNNING bit before dropping the queue lock.  So
716                  * if it is set here then we've found a bug.
717                  */
718         } while (WARN_ON_ONCE(running));
719 }
720
721 static void dev_init_scheduler_queue(struct net_device *dev,
722                                      struct netdev_queue *dev_queue,
723                                      void *_qdisc)
724 {
725         struct Qdisc *qdisc = _qdisc;
726
727         dev_queue->qdisc = qdisc;
728         dev_queue->qdisc_sleeping = qdisc;
729         INIT_LIST_HEAD(&dev_queue->qdisc_list);
730 }
731
732 void dev_init_scheduler(struct net_device *dev)
733 {
734         qdisc_lock_tree(dev);
735         netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
736         dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
737         qdisc_unlock_tree(dev);
738
739         setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
740 }
741
742 static void shutdown_scheduler_queue(struct net_device *dev,
743                                      struct netdev_queue *dev_queue,
744                                      void *_qdisc_default)
745 {
746         struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
747         struct Qdisc *qdisc_default = _qdisc_default;
748
749         if (qdisc) {
750                 dev_queue->qdisc = qdisc_default;
751                 dev_queue->qdisc_sleeping = qdisc_default;
752
753                 qdisc_destroy(qdisc);
754         }
755 }
756
757 void dev_shutdown(struct net_device *dev)
758 {
759         qdisc_lock_tree(dev);
760         netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
761         shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
762         BUG_TRAP(!timer_pending(&dev->watchdog_timer));
763         qdisc_unlock_tree(dev);
764 }