2 * Copyright (c) 2008, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <linux/skbuff.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
30 struct multiq_sched_data {
34 struct tcf_proto *filter_list;
35 struct Qdisc **queues;
40 multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
42 struct multiq_sched_data *q = qdisc_priv(sch);
44 struct tcf_result res;
47 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
48 err = tc_classify(skb, q->filter_list, &res);
49 #ifdef CONFIG_NET_CLS_ACT
53 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
58 band = skb_get_queue_mapping(skb);
63 return q->queues[band];
67 multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
72 qdisc = multiq_classify(skb, sch, &ret);
73 #ifdef CONFIG_NET_CLS_ACT
76 if (ret & __NET_XMIT_BYPASS)
83 ret = qdisc_enqueue(skb, qdisc);
84 if (ret == NET_XMIT_SUCCESS) {
85 sch->bstats.bytes += qdisc_pkt_len(skb);
86 sch->bstats.packets++;
88 return NET_XMIT_SUCCESS;
90 if (net_xmit_drop_count(ret))
97 multiq_requeue(struct sk_buff *skb, struct Qdisc *sch)
100 struct multiq_sched_data *q = qdisc_priv(sch);
103 qdisc = multiq_classify(skb, sch, &ret);
104 #ifdef CONFIG_NET_CLS_ACT
106 if (ret & __NET_XMIT_BYPASS)
113 ret = qdisc->ops->requeue(skb, qdisc);
114 if (ret == NET_XMIT_SUCCESS) {
116 sch->qstats.requeues++;
120 q->curband = q->bands - 1;
121 return NET_XMIT_SUCCESS;
123 if (net_xmit_drop_count(ret))
129 static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
131 struct multiq_sched_data *q = qdisc_priv(sch);
136 for (band = 0; band < q->bands; band++) {
137 /* cycle through bands to ensure fairness */
139 if (q->curband >= q->bands)
142 /* Check that target subqueue is available before
143 * pulling an skb to avoid excessive requeues
145 if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
146 qdisc = q->queues[q->curband];
147 skb = qdisc->dequeue(qdisc);
158 static unsigned int multiq_drop(struct Qdisc *sch)
160 struct multiq_sched_data *q = qdisc_priv(sch);
165 for (band = q->bands-1; band >= 0; band--) {
166 qdisc = q->queues[band];
167 if (qdisc->ops->drop) {
168 len = qdisc->ops->drop(qdisc);
180 multiq_reset(struct Qdisc *sch)
183 struct multiq_sched_data *q = qdisc_priv(sch);
185 for (band = 0; band < q->bands; band++)
186 qdisc_reset(q->queues[band]);
192 multiq_destroy(struct Qdisc *sch)
195 struct multiq_sched_data *q = qdisc_priv(sch);
197 tcf_destroy_chain(&q->filter_list);
198 for (band = 0; band < q->bands; band++)
199 qdisc_destroy(q->queues[band]);
204 static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
206 struct multiq_sched_data *q = qdisc_priv(sch);
207 struct tc_multiq_qopt *qopt;
210 if (!netif_is_multiqueue(qdisc_dev(sch)))
212 if (nla_len(opt) < sizeof(*qopt))
215 qopt = nla_data(opt);
217 qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
220 q->bands = qopt->bands;
221 for (i = q->bands; i < q->max_bands; i++) {
222 if (q->queues[i] != &noop_qdisc) {
223 struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
224 qdisc_tree_decrease_qlen(child, child->q.qlen);
225 qdisc_destroy(child);
229 sch_tree_unlock(sch);
231 for (i = 0; i < q->bands; i++) {
232 if (q->queues[i] == &noop_qdisc) {
234 child = qdisc_create_dflt(qdisc_dev(sch),
237 TC_H_MAKE(sch->handle,
241 child = xchg(&q->queues[i], child);
243 if (child != &noop_qdisc) {
244 qdisc_tree_decrease_qlen(child,
246 qdisc_destroy(child);
248 sch_tree_unlock(sch);
255 static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
257 struct multiq_sched_data *q = qdisc_priv(sch);
265 q->max_bands = qdisc_dev(sch)->num_tx_queues;
267 q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
270 for (i = 0; i < q->max_bands; i++)
271 q->queues[i] = &noop_qdisc;
273 err = multiq_tune(sch,opt);
281 static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
283 struct multiq_sched_data *q = qdisc_priv(sch);
284 unsigned char *b = skb_tail_pointer(skb);
285 struct tc_multiq_qopt opt;
287 opt.bands = q->bands;
288 opt.max_bands = q->max_bands;
290 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
299 static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
302 struct multiq_sched_data *q = qdisc_priv(sch);
303 unsigned long band = arg - 1;
305 if (band >= q->bands)
312 *old = q->queues[band];
313 q->queues[band] = new;
314 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
316 sch_tree_unlock(sch);
321 static struct Qdisc *
322 multiq_leaf(struct Qdisc *sch, unsigned long arg)
324 struct multiq_sched_data *q = qdisc_priv(sch);
325 unsigned long band = arg - 1;
327 if (band >= q->bands)
330 return q->queues[band];
333 static unsigned long multiq_get(struct Qdisc *sch, u32 classid)
335 struct multiq_sched_data *q = qdisc_priv(sch);
336 unsigned long band = TC_H_MIN(classid);
338 if (band - 1 >= q->bands)
343 static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
346 return multiq_get(sch, classid);
350 static void multiq_put(struct Qdisc *q, unsigned long cl)
355 static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent,
356 struct nlattr **tca, unsigned long *arg)
358 unsigned long cl = *arg;
359 struct multiq_sched_data *q = qdisc_priv(sch);
361 if (cl - 1 > q->bands)
366 static int multiq_delete(struct Qdisc *sch, unsigned long cl)
368 struct multiq_sched_data *q = qdisc_priv(sch);
369 if (cl - 1 > q->bands)
375 static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
376 struct sk_buff *skb, struct tcmsg *tcm)
378 struct multiq_sched_data *q = qdisc_priv(sch);
380 if (cl - 1 > q->bands)
382 tcm->tcm_handle |= TC_H_MIN(cl);
384 tcm->tcm_info = q->queues[cl-1]->handle;
388 static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
391 struct multiq_sched_data *q = qdisc_priv(sch);
394 cl_q = q->queues[cl - 1];
395 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
396 gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
402 static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
404 struct multiq_sched_data *q = qdisc_priv(sch);
410 for (band = 0; band < q->bands; band++) {
411 if (arg->count < arg->skip) {
415 if (arg->fn(sch, band+1, arg) < 0) {
423 static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl)
425 struct multiq_sched_data *q = qdisc_priv(sch);
429 return &q->filter_list;
432 static const struct Qdisc_class_ops multiq_class_ops = {
433 .graft = multiq_graft,
437 .change = multiq_change,
438 .delete = multiq_delete,
440 .tcf_chain = multiq_find_tcf,
441 .bind_tcf = multiq_bind,
442 .unbind_tcf = multiq_put,
443 .dump = multiq_dump_class,
444 .dump_stats = multiq_dump_class_stats,
447 static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
449 .cl_ops = &multiq_class_ops,
451 .priv_size = sizeof(struct multiq_sched_data),
452 .enqueue = multiq_enqueue,
453 .dequeue = multiq_dequeue,
454 .requeue = multiq_requeue,
457 .reset = multiq_reset,
458 .destroy = multiq_destroy,
459 .change = multiq_tune,
461 .owner = THIS_MODULE,
464 static int __init multiq_module_init(void)
466 return register_qdisc(&multiq_qdisc_ops);
469 static void __exit multiq_module_exit(void)
471 unregister_qdisc(&multiq_qdisc_ops);
474 module_init(multiq_module_init)
475 module_exit(multiq_module_exit)
477 MODULE_LICENSE("GPL");