Commit | Line | Data |
---|---|---|
92651940 AD |
1 | /* |
2 | * Copyright (c) 2008, Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
16 | * | |
17 | * Author: Alexander Duyck <alexander.h.duyck@intel.com> | |
18 | */ | |
19 | ||
20 | #include <linux/module.h> | |
21 | #include <linux/types.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/skbuff.h> | |
26 | #include <net/netlink.h> | |
27 | #include <net/pkt_sched.h> | |
28 | ||
29 | ||
30 | struct multiq_sched_data { | |
31 | u16 bands; | |
32 | u16 max_bands; | |
33 | u16 curband; | |
34 | struct tcf_proto *filter_list; | |
35 | struct Qdisc **queues; | |
36 | }; | |
37 | ||
38 | ||
39 | static struct Qdisc * | |
40 | multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |
41 | { | |
42 | struct multiq_sched_data *q = qdisc_priv(sch); | |
43 | u32 band; | |
44 | struct tcf_result res; | |
45 | int err; | |
46 | ||
47 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | |
48 | err = tc_classify(skb, q->filter_list, &res); | |
49 | #ifdef CONFIG_NET_CLS_ACT | |
50 | switch (err) { | |
51 | case TC_ACT_STOLEN: | |
52 | case TC_ACT_QUEUED: | |
53 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; | |
54 | case TC_ACT_SHOT: | |
55 | return NULL; | |
56 | } | |
57 | #endif | |
58 | band = skb_get_queue_mapping(skb); | |
59 | ||
60 | if (band >= q->bands) | |
61 | return q->queues[0]; | |
62 | ||
63 | return q->queues[band]; | |
64 | } | |
65 | ||
66 | static int | |
67 | multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
68 | { | |
69 | struct Qdisc *qdisc; | |
70 | int ret; | |
71 | ||
72 | qdisc = multiq_classify(skb, sch, &ret); | |
73 | #ifdef CONFIG_NET_CLS_ACT | |
74 | if (qdisc == NULL) { | |
75 | ||
76 | if (ret & __NET_XMIT_BYPASS) | |
77 | sch->qstats.drops++; | |
78 | kfree_skb(skb); | |
79 | return ret; | |
80 | } | |
81 | #endif | |
82 | ||
83 | ret = qdisc_enqueue(skb, qdisc); | |
84 | if (ret == NET_XMIT_SUCCESS) { | |
85 | sch->bstats.bytes += qdisc_pkt_len(skb); | |
86 | sch->bstats.packets++; | |
87 | sch->q.qlen++; | |
88 | return NET_XMIT_SUCCESS; | |
89 | } | |
90 | if (net_xmit_drop_count(ret)) | |
91 | sch->qstats.drops++; | |
92 | return ret; | |
93 | } | |
94 | ||
92651940 AD |
95 | static struct sk_buff *multiq_dequeue(struct Qdisc *sch) |
96 | { | |
97 | struct multiq_sched_data *q = qdisc_priv(sch); | |
98 | struct Qdisc *qdisc; | |
99 | struct sk_buff *skb; | |
100 | int band; | |
101 | ||
102 | for (band = 0; band < q->bands; band++) { | |
103 | /* cycle through bands to ensure fairness */ | |
104 | q->curband++; | |
105 | if (q->curband >= q->bands) | |
106 | q->curband = 0; | |
107 | ||
108 | /* Check that target subqueue is available before | |
f30ab418 | 109 | * pulling an skb to avoid head-of-line blocking. |
92651940 AD |
110 | */ |
111 | if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { | |
112 | qdisc = q->queues[q->curband]; | |
113 | skb = qdisc->dequeue(qdisc); | |
114 | if (skb) { | |
115 | sch->q.qlen--; | |
116 | return skb; | |
117 | } | |
118 | } | |
119 | } | |
120 | return NULL; | |
121 | ||
122 | } | |
123 | ||
8e3af978 JP |
124 | static struct sk_buff *multiq_peek(struct Qdisc *sch) |
125 | { | |
126 | struct multiq_sched_data *q = qdisc_priv(sch); | |
127 | unsigned int curband = q->curband; | |
128 | struct Qdisc *qdisc; | |
129 | struct sk_buff *skb; | |
130 | int band; | |
131 | ||
132 | for (band = 0; band < q->bands; band++) { | |
133 | /* cycle through bands to ensure fairness */ | |
134 | curband++; | |
135 | if (curband >= q->bands) | |
136 | curband = 0; | |
137 | ||
138 | /* Check that target subqueue is available before | |
f30ab418 | 139 | * pulling an skb to avoid head-of-line blocking. |
8e3af978 JP |
140 | */ |
141 | if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) { | |
142 | qdisc = q->queues[curband]; | |
143 | skb = qdisc->ops->peek(qdisc); | |
144 | if (skb) | |
145 | return skb; | |
146 | } | |
147 | } | |
148 | return NULL; | |
149 | ||
150 | } | |
151 | ||
92651940 AD |
152 | static unsigned int multiq_drop(struct Qdisc *sch) |
153 | { | |
154 | struct multiq_sched_data *q = qdisc_priv(sch); | |
155 | int band; | |
156 | unsigned int len; | |
157 | struct Qdisc *qdisc; | |
158 | ||
159 | for (band = q->bands-1; band >= 0; band--) { | |
160 | qdisc = q->queues[band]; | |
161 | if (qdisc->ops->drop) { | |
162 | len = qdisc->ops->drop(qdisc); | |
163 | if (len != 0) { | |
164 | sch->q.qlen--; | |
165 | return len; | |
166 | } | |
167 | } | |
168 | } | |
169 | return 0; | |
170 | } | |
171 | ||
172 | ||
173 | static void | |
174 | multiq_reset(struct Qdisc *sch) | |
175 | { | |
176 | u16 band; | |
177 | struct multiq_sched_data *q = qdisc_priv(sch); | |
178 | ||
179 | for (band = 0; band < q->bands; band++) | |
180 | qdisc_reset(q->queues[band]); | |
181 | sch->q.qlen = 0; | |
182 | q->curband = 0; | |
183 | } | |
184 | ||
185 | static void | |
186 | multiq_destroy(struct Qdisc *sch) | |
187 | { | |
188 | int band; | |
189 | struct multiq_sched_data *q = qdisc_priv(sch); | |
190 | ||
191 | tcf_destroy_chain(&q->filter_list); | |
192 | for (band = 0; band < q->bands; band++) | |
193 | qdisc_destroy(q->queues[band]); | |
194 | ||
195 | kfree(q->queues); | |
196 | } | |
197 | ||
198 | static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) | |
199 | { | |
200 | struct multiq_sched_data *q = qdisc_priv(sch); | |
201 | struct tc_multiq_qopt *qopt; | |
202 | int i; | |
203 | ||
204 | if (!netif_is_multiqueue(qdisc_dev(sch))) | |
205 | return -EINVAL; | |
206 | if (nla_len(opt) < sizeof(*qopt)) | |
207 | return -EINVAL; | |
208 | ||
209 | qopt = nla_data(opt); | |
210 | ||
211 | qopt->bands = qdisc_dev(sch)->real_num_tx_queues; | |
212 | ||
213 | sch_tree_lock(sch); | |
214 | q->bands = qopt->bands; | |
215 | for (i = q->bands; i < q->max_bands; i++) { | |
f07d1501 | 216 | if (q->queues[i] != &noop_qdisc) { |
b94c8afc PM |
217 | struct Qdisc *child = q->queues[i]; |
218 | q->queues[i] = &noop_qdisc; | |
92651940 AD |
219 | qdisc_tree_decrease_qlen(child, child->q.qlen); |
220 | qdisc_destroy(child); | |
221 | } | |
222 | } | |
223 | ||
224 | sch_tree_unlock(sch); | |
225 | ||
226 | for (i = 0; i < q->bands; i++) { | |
227 | if (q->queues[i] == &noop_qdisc) { | |
b94c8afc | 228 | struct Qdisc *child, *old; |
92651940 AD |
229 | child = qdisc_create_dflt(qdisc_dev(sch), |
230 | sch->dev_queue, | |
231 | &pfifo_qdisc_ops, | |
232 | TC_H_MAKE(sch->handle, | |
233 | i + 1)); | |
234 | if (child) { | |
235 | sch_tree_lock(sch); | |
b94c8afc PM |
236 | old = q->queues[i]; |
237 | q->queues[i] = child; | |
92651940 | 238 | |
b94c8afc PM |
239 | if (old != &noop_qdisc) { |
240 | qdisc_tree_decrease_qlen(old, | |
241 | old->q.qlen); | |
242 | qdisc_destroy(old); | |
92651940 AD |
243 | } |
244 | sch_tree_unlock(sch); | |
245 | } | |
246 | } | |
247 | } | |
248 | return 0; | |
249 | } | |
250 | ||
251 | static int multiq_init(struct Qdisc *sch, struct nlattr *opt) | |
252 | { | |
253 | struct multiq_sched_data *q = qdisc_priv(sch); | |
f07d1501 | 254 | int i, err; |
92651940 AD |
255 | |
256 | q->queues = NULL; | |
257 | ||
258 | if (opt == NULL) | |
259 | return -EINVAL; | |
260 | ||
261 | q->max_bands = qdisc_dev(sch)->num_tx_queues; | |
262 | ||
263 | q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL); | |
264 | if (!q->queues) | |
265 | return -ENOBUFS; | |
266 | for (i = 0; i < q->max_bands; i++) | |
267 | q->queues[i] = &noop_qdisc; | |
268 | ||
f07d1501 AD |
269 | err = multiq_tune(sch,opt); |
270 | ||
271 | if (err) | |
272 | kfree(q->queues); | |
273 | ||
274 | return err; | |
92651940 AD |
275 | } |
276 | ||
277 | static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) | |
278 | { | |
279 | struct multiq_sched_data *q = qdisc_priv(sch); | |
280 | unsigned char *b = skb_tail_pointer(skb); | |
281 | struct tc_multiq_qopt opt; | |
282 | ||
283 | opt.bands = q->bands; | |
284 | opt.max_bands = q->max_bands; | |
285 | ||
286 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | |
287 | ||
288 | return skb->len; | |
289 | ||
290 | nla_put_failure: | |
291 | nlmsg_trim(skb, b); | |
292 | return -1; | |
293 | } | |
294 | ||
295 | static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
296 | struct Qdisc **old) | |
297 | { | |
298 | struct multiq_sched_data *q = qdisc_priv(sch); | |
299 | unsigned long band = arg - 1; | |
300 | ||
301 | if (band >= q->bands) | |
302 | return -EINVAL; | |
303 | ||
304 | if (new == NULL) | |
305 | new = &noop_qdisc; | |
306 | ||
307 | sch_tree_lock(sch); | |
308 | *old = q->queues[band]; | |
309 | q->queues[band] = new; | |
310 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | |
311 | qdisc_reset(*old); | |
312 | sch_tree_unlock(sch); | |
313 | ||
314 | return 0; | |
315 | } | |
316 | ||
317 | static struct Qdisc * | |
318 | multiq_leaf(struct Qdisc *sch, unsigned long arg) | |
319 | { | |
320 | struct multiq_sched_data *q = qdisc_priv(sch); | |
321 | unsigned long band = arg - 1; | |
322 | ||
323 | if (band >= q->bands) | |
324 | return NULL; | |
325 | ||
326 | return q->queues[band]; | |
327 | } | |
328 | ||
329 | static unsigned long multiq_get(struct Qdisc *sch, u32 classid) | |
330 | { | |
331 | struct multiq_sched_data *q = qdisc_priv(sch); | |
332 | unsigned long band = TC_H_MIN(classid); | |
333 | ||
334 | if (band - 1 >= q->bands) | |
335 | return 0; | |
336 | return band; | |
337 | } | |
338 | ||
339 | static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, | |
340 | u32 classid) | |
341 | { | |
342 | return multiq_get(sch, classid); | |
343 | } | |
344 | ||
345 | ||
346 | static void multiq_put(struct Qdisc *q, unsigned long cl) | |
347 | { | |
348 | return; | |
349 | } | |
350 | ||
351 | static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent, | |
352 | struct nlattr **tca, unsigned long *arg) | |
353 | { | |
354 | unsigned long cl = *arg; | |
355 | struct multiq_sched_data *q = qdisc_priv(sch); | |
356 | ||
357 | if (cl - 1 > q->bands) | |
358 | return -ENOENT; | |
359 | return 0; | |
360 | } | |
361 | ||
362 | static int multiq_delete(struct Qdisc *sch, unsigned long cl) | |
363 | { | |
364 | struct multiq_sched_data *q = qdisc_priv(sch); | |
365 | if (cl - 1 > q->bands) | |
366 | return -ENOENT; | |
367 | return 0; | |
368 | } | |
369 | ||
370 | ||
371 | static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, | |
372 | struct sk_buff *skb, struct tcmsg *tcm) | |
373 | { | |
374 | struct multiq_sched_data *q = qdisc_priv(sch); | |
375 | ||
376 | if (cl - 1 > q->bands) | |
377 | return -ENOENT; | |
378 | tcm->tcm_handle |= TC_H_MIN(cl); | |
379 | if (q->queues[cl-1]) | |
380 | tcm->tcm_info = q->queues[cl-1]->handle; | |
381 | return 0; | |
382 | } | |
383 | ||
384 | static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |
385 | struct gnet_dump *d) | |
386 | { | |
387 | struct multiq_sched_data *q = qdisc_priv(sch); | |
388 | struct Qdisc *cl_q; | |
389 | ||
390 | cl_q = q->queues[cl - 1]; | |
391 | if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || | |
392 | gnet_stats_copy_queue(d, &cl_q->qstats) < 0) | |
393 | return -1; | |
394 | ||
395 | return 0; | |
396 | } | |
397 | ||
398 | static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
399 | { | |
400 | struct multiq_sched_data *q = qdisc_priv(sch); | |
401 | int band; | |
402 | ||
403 | if (arg->stop) | |
404 | return; | |
405 | ||
406 | for (band = 0; band < q->bands; band++) { | |
407 | if (arg->count < arg->skip) { | |
408 | arg->count++; | |
409 | continue; | |
410 | } | |
411 | if (arg->fn(sch, band+1, arg) < 0) { | |
412 | arg->stop = 1; | |
413 | break; | |
414 | } | |
415 | arg->count++; | |
416 | } | |
417 | } | |
418 | ||
419 | static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl) | |
420 | { | |
421 | struct multiq_sched_data *q = qdisc_priv(sch); | |
422 | ||
423 | if (cl) | |
424 | return NULL; | |
425 | return &q->filter_list; | |
426 | } | |
427 | ||
428 | static const struct Qdisc_class_ops multiq_class_ops = { | |
429 | .graft = multiq_graft, | |
430 | .leaf = multiq_leaf, | |
431 | .get = multiq_get, | |
432 | .put = multiq_put, | |
433 | .change = multiq_change, | |
434 | .delete = multiq_delete, | |
435 | .walk = multiq_walk, | |
436 | .tcf_chain = multiq_find_tcf, | |
437 | .bind_tcf = multiq_bind, | |
438 | .unbind_tcf = multiq_put, | |
439 | .dump = multiq_dump_class, | |
440 | .dump_stats = multiq_dump_class_stats, | |
441 | }; | |
442 | ||
443 | static struct Qdisc_ops multiq_qdisc_ops __read_mostly = { | |
444 | .next = NULL, | |
445 | .cl_ops = &multiq_class_ops, | |
446 | .id = "multiq", | |
447 | .priv_size = sizeof(struct multiq_sched_data), | |
448 | .enqueue = multiq_enqueue, | |
449 | .dequeue = multiq_dequeue, | |
8e3af978 | 450 | .peek = multiq_peek, |
92651940 AD |
451 | .drop = multiq_drop, |
452 | .init = multiq_init, | |
453 | .reset = multiq_reset, | |
454 | .destroy = multiq_destroy, | |
455 | .change = multiq_tune, | |
456 | .dump = multiq_dump, | |
457 | .owner = THIS_MODULE, | |
458 | }; | |
459 | ||
460 | static int __init multiq_module_init(void) | |
461 | { | |
462 | return register_qdisc(&multiq_qdisc_ops); | |
463 | } | |
464 | ||
465 | static void __exit multiq_module_exit(void) | |
466 | { | |
467 | unregister_qdisc(&multiq_qdisc_ops); | |
468 | } | |
469 | ||
470 | module_init(multiq_module_init) | |
471 | module_exit(multiq_module_exit) | |
472 | ||
473 | MODULE_LICENSE("GPL"); |