blob: 830ccc544a1541a9b697bcedd8fd5170db0c3c8e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
Patrick McHardy41794772007-03-16 01:19:15 -070029#include <linux/hrtimer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020031#include <net/net_namespace.h>
Denis V. Lunevb8542722007-12-01 00:21:31 +110032#include <net/sock.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070033#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <net/pkt_sched.h>
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
37 struct Qdisc *old, struct Qdisc *new);
38static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
39 struct Qdisc *q, unsigned long cl, int event);
40
41/*
42
43 Short review.
44 -------------
45
46 This file consists of two interrelated parts:
47
48 1. queueing disciplines manager frontend.
49 2. traffic classes manager frontend.
50
51 Generally, queueing discipline ("qdisc") is a black box,
52 which is able to enqueue packets and to dequeue them (when
53 device is ready to send something) in order and at times
54 determined by algorithm hidden in it.
55
56 qdisc's are divided to two categories:
57 - "queues", which have no internal structure visible from outside.
58 - "schedulers", which split all the packets to "traffic classes",
59 using "packet classifiers" (look at cls_api.c)
60
61 In turn, classes may have child qdiscs (as rule, queues)
62 attached to them etc. etc. etc.
63
64 The goal of the routines in this file is to translate
65 information supplied by user in the form of handles
66 to more intelligible for kernel form, to make some sanity
67 checks and part of work, which is common to all qdiscs
68 and to provide rtnetlink notifications.
69
70 All real intelligent work is done inside qdisc modules.
71
72
73
74 Every discipline has two major routines: enqueue and dequeue.
75
76 ---dequeue
77
78 dequeue usually returns a skb to send. It is allowed to return NULL,
79 but it does not mean that queue is empty, it just means that
80 discipline does not want to send anything this time.
81 Queue is really empty if q->q.qlen == 0.
82 For complicated disciplines with multiple queues q->q is not
83 real packet queue, but however q->q.qlen must be valid.
84
85 ---enqueue
86
87 enqueue returns 0, if packet was enqueued successfully.
88 If packet (this one or another one) was dropped, it returns
89 not zero error code.
90 NET_XMIT_DROP - this packet dropped
91 Expected action: do not backoff, but wait until queue will clear.
92 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
93 Expected action: backoff or ignore
94 NET_XMIT_POLICED - dropped by police.
95 Expected action: backoff or error to real-time apps.
96
97 Auxiliary routines:
98
99 ---requeue
100
101 requeues once dequeued packet. It is used for non-standard or
David S. Millere65d22e2008-07-08 16:46:01 -0700102 just buggy devices, which can defer output even if netif_queue_stopped()=0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 ---reset
105
106 returns qdisc to initial state: purge all buffers, clear all
107 timers, counters (except for statistics) etc.
108
109 ---init
110
111 initializes newly created qdisc.
112
113 ---destroy
114
115 destroys resources allocated by init and during lifetime of qdisc.
116
117 ---change
118
119 changes qdisc parameters.
120 */
121
122/* Protects list of registered TC modules. It is pure SMP lock. */
123static DEFINE_RWLOCK(qdisc_mod_lock);
124
125
126/************************************************
127 * Queueing disciplines manipulation. *
128 ************************************************/
129
130
131/* The list of all installed queueing disciplines. */
132
133static struct Qdisc_ops *qdisc_base;
134
135/* Register/uregister queueing discipline */
136
137int register_qdisc(struct Qdisc_ops *qops)
138{
139 struct Qdisc_ops *q, **qp;
140 int rc = -EEXIST;
141
142 write_lock(&qdisc_mod_lock);
143 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
144 if (!strcmp(qops->id, q->id))
145 goto out;
146
147 if (qops->enqueue == NULL)
148 qops->enqueue = noop_qdisc_ops.enqueue;
149 if (qops->requeue == NULL)
150 qops->requeue = noop_qdisc_ops.requeue;
151 if (qops->dequeue == NULL)
152 qops->dequeue = noop_qdisc_ops.dequeue;
153
154 qops->next = NULL;
155 *qp = qops;
156 rc = 0;
157out:
158 write_unlock(&qdisc_mod_lock);
159 return rc;
160}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800161EXPORT_SYMBOL(register_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163int unregister_qdisc(struct Qdisc_ops *qops)
164{
165 struct Qdisc_ops *q, **qp;
166 int err = -ENOENT;
167
168 write_lock(&qdisc_mod_lock);
169 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
170 if (q == qops)
171 break;
172 if (q) {
173 *qp = q->next;
174 q->next = NULL;
175 err = 0;
176 }
177 write_unlock(&qdisc_mod_lock);
178 return err;
179}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800180EXPORT_SYMBOL(unregister_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182/* We know handle. Find qdisc among all qdisc's attached to device
183 (root qdisc, all its children, children of children etc.)
184 */
185
David S. Millere8a04642008-07-17 00:34:19 -0700186static struct Qdisc *__qdisc_lookup(struct netdev_queue *dev_queue, u32 handle)
Patrick McHardy43effa12006-11-29 17:35:48 -0800187{
188 struct Qdisc *q;
189
David S. Millerb0e1e642008-07-08 17:42:10 -0700190 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
Patrick McHardy43effa12006-11-29 17:35:48 -0800191 if (q->handle == handle)
192 return q;
193 }
194 return NULL;
195}
196
David S. Millere8a04642008-07-17 00:34:19 -0700197struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
198{
199 unsigned int i;
200
201 for (i = 0; i < dev->num_tx_queues; i++) {
202 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
203 struct Qdisc *q = __qdisc_lookup(txq, handle);
204 if (q)
205 return q;
206 }
207 return NULL;
208}
209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
211{
212 unsigned long cl;
213 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -0800214 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
216 if (cops == NULL)
217 return NULL;
218 cl = cops->get(p, classid);
219
220 if (cl == 0)
221 return NULL;
222 leaf = cops->leaf(p, cl);
223 cops->put(p, cl);
224 return leaf;
225}
226
227/* Find queueing discipline by name */
228
Patrick McHardy1e904742008-01-22 22:11:17 -0800229static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
231 struct Qdisc_ops *q = NULL;
232
233 if (kind) {
234 read_lock(&qdisc_mod_lock);
235 for (q = qdisc_base; q; q = q->next) {
Patrick McHardy1e904742008-01-22 22:11:17 -0800236 if (nla_strcmp(kind, q->id) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 if (!try_module_get(q->owner))
238 q = NULL;
239 break;
240 }
241 }
242 read_unlock(&qdisc_mod_lock);
243 }
244 return q;
245}
246
247static struct qdisc_rate_table *qdisc_rtab_list;
248
Patrick McHardy1e904742008-01-22 22:11:17 -0800249struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250{
251 struct qdisc_rate_table *rtab;
252
253 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
254 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
255 rtab->refcnt++;
256 return rtab;
257 }
258 }
259
Patrick McHardy5feb5e12008-01-23 20:35:19 -0800260 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
261 nla_len(tab) != TC_RTAB_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 return NULL;
263
264 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
265 if (rtab) {
266 rtab->rate = *r;
267 rtab->refcnt = 1;
Patrick McHardy1e904742008-01-22 22:11:17 -0800268 memcpy(rtab->data, nla_data(tab), 1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 rtab->next = qdisc_rtab_list;
270 qdisc_rtab_list = rtab;
271 }
272 return rtab;
273}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800274EXPORT_SYMBOL(qdisc_get_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276void qdisc_put_rtab(struct qdisc_rate_table *tab)
277{
278 struct qdisc_rate_table *rtab, **rtabp;
279
280 if (!tab || --tab->refcnt)
281 return;
282
283 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
284 if (rtab == tab) {
285 *rtabp = rtab->next;
286 kfree(rtab);
287 return;
288 }
289 }
290}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800291EXPORT_SYMBOL(qdisc_put_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Patrick McHardy41794772007-03-16 01:19:15 -0700293static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
294{
295 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
296 timer);
David S. Miller86d804e2008-07-08 23:11:25 -0700297 struct netdev_queue *txq = wd->qdisc->dev_queue;
Patrick McHardy41794772007-03-16 01:19:15 -0700298
299 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
Stephen Hemminger11274e52007-03-22 12:17:42 -0700300 smp_wmb();
David S. Miller86d804e2008-07-08 23:11:25 -0700301 netif_schedule_queue(txq);
Stephen Hemminger19365022007-03-22 12:18:35 -0700302
Patrick McHardy41794772007-03-16 01:19:15 -0700303 return HRTIMER_NORESTART;
304}
305
306void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
307{
308 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
309 wd->timer.function = qdisc_watchdog;
310 wd->qdisc = qdisc;
311}
312EXPORT_SYMBOL(qdisc_watchdog_init);
313
314void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
315{
316 ktime_t time;
317
318 wd->qdisc->flags |= TCQ_F_THROTTLED;
319 time = ktime_set(0, 0);
320 time = ktime_add_ns(time, PSCHED_US2NS(expires));
321 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
322}
323EXPORT_SYMBOL(qdisc_watchdog_schedule);
324
325void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
326{
327 hrtimer_cancel(&wd->timer);
328 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
329}
330EXPORT_SYMBOL(qdisc_watchdog_cancel);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700332struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
333{
334 unsigned int size = n * sizeof(struct hlist_head), i;
335 struct hlist_head *h;
336
337 if (size <= PAGE_SIZE)
338 h = kmalloc(size, GFP_KERNEL);
339 else
340 h = (struct hlist_head *)
341 __get_free_pages(GFP_KERNEL, get_order(size));
342
343 if (h != NULL) {
344 for (i = 0; i < n; i++)
345 INIT_HLIST_HEAD(&h[i]);
346 }
347 return h;
348}
349
350static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
351{
352 unsigned int size = n * sizeof(struct hlist_head);
353
354 if (size <= PAGE_SIZE)
355 kfree(h);
356 else
357 free_pages((unsigned long)h, get_order(size));
358}
359
360void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
361{
362 struct Qdisc_class_common *cl;
363 struct hlist_node *n, *next;
364 struct hlist_head *nhash, *ohash;
365 unsigned int nsize, nmask, osize;
366 unsigned int i, h;
367
368 /* Rehash when load factor exceeds 0.75 */
369 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
370 return;
371 nsize = clhash->hashsize * 2;
372 nmask = nsize - 1;
373 nhash = qdisc_class_hash_alloc(nsize);
374 if (nhash == NULL)
375 return;
376
377 ohash = clhash->hash;
378 osize = clhash->hashsize;
379
380 sch_tree_lock(sch);
381 for (i = 0; i < osize; i++) {
382 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
383 h = qdisc_class_hash(cl->classid, nmask);
384 hlist_add_head(&cl->hnode, &nhash[h]);
385 }
386 }
387 clhash->hash = nhash;
388 clhash->hashsize = nsize;
389 clhash->hashmask = nmask;
390 sch_tree_unlock(sch);
391
392 qdisc_class_hash_free(ohash, osize);
393}
394EXPORT_SYMBOL(qdisc_class_hash_grow);
395
396int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
397{
398 unsigned int size = 4;
399
400 clhash->hash = qdisc_class_hash_alloc(size);
401 if (clhash->hash == NULL)
402 return -ENOMEM;
403 clhash->hashsize = size;
404 clhash->hashmask = size - 1;
405 clhash->hashelems = 0;
406 return 0;
407}
408EXPORT_SYMBOL(qdisc_class_hash_init);
409
410void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
411{
412 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
413}
414EXPORT_SYMBOL(qdisc_class_hash_destroy);
415
416void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
417 struct Qdisc_class_common *cl)
418{
419 unsigned int h;
420
421 INIT_HLIST_NODE(&cl->hnode);
422 h = qdisc_class_hash(cl->classid, clhash->hashmask);
423 hlist_add_head(&cl->hnode, &clhash->hash[h]);
424 clhash->hashelems++;
425}
426EXPORT_SYMBOL(qdisc_class_hash_insert);
427
428void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
429 struct Qdisc_class_common *cl)
430{
431 hlist_del(&cl->hnode);
432 clhash->hashelems--;
433}
434EXPORT_SYMBOL(qdisc_class_hash_remove);
435
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436/* Allocate an unique handle from space managed by kernel */
437
438static u32 qdisc_alloc_handle(struct net_device *dev)
439{
440 int i = 0x10000;
441 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
442
443 do {
444 autohandle += TC_H_MAKE(0x10000U, 0);
445 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
446 autohandle = TC_H_MAKE(0x80000000U, 0);
447 } while (qdisc_lookup(dev, autohandle) && --i > 0);
448
449 return i>0 ? autohandle : 0;
450}
451
452/* Attach toplevel qdisc to device dev */
453
454static struct Qdisc *
455dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
456{
David S. Millerb0e1e642008-07-08 17:42:10 -0700457 struct netdev_queue *dev_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 struct Qdisc *oqdisc;
459
460 if (dev->flags & IFF_UP)
461 dev_deactivate(dev);
462
463 qdisc_lock_tree(dev);
464 if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
David S. Miller816f3252008-07-08 22:49:00 -0700465 dev_queue = &dev->rx_queue;
466 oqdisc = dev_queue->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 /* Prune old scheduler */
468 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
469 /* delete */
470 qdisc_reset(oqdisc);
David S. Miller816f3252008-07-08 22:49:00 -0700471 dev_queue->qdisc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 } else { /* new */
David S. Miller816f3252008-07-08 22:49:00 -0700473 dev_queue->qdisc = qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 }
475
476 } else {
David S. Millere8a04642008-07-17 00:34:19 -0700477 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Millerb0e1e642008-07-08 17:42:10 -0700478 oqdisc = dev_queue->qdisc_sleeping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
480 /* Prune old scheduler */
481 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
482 qdisc_reset(oqdisc);
483
484 /* ... and graft new one */
485 if (qdisc == NULL)
486 qdisc = &noop_qdisc;
David S. Millerb0e1e642008-07-08 17:42:10 -0700487 dev_queue->qdisc_sleeping = qdisc;
488 dev_queue->qdisc = &noop_qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 }
490
491 qdisc_unlock_tree(dev);
492
493 if (dev->flags & IFF_UP)
494 dev_activate(dev);
495
496 return oqdisc;
497}
498
Patrick McHardy43effa12006-11-29 17:35:48 -0800499void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
500{
Eric Dumazet20fea082007-11-14 01:44:41 -0800501 const struct Qdisc_class_ops *cops;
Patrick McHardy43effa12006-11-29 17:35:48 -0800502 unsigned long cl;
503 u32 parentid;
504
505 if (n == 0)
506 return;
507 while ((parentid = sch->parent)) {
Jarek Poplawski066a3b52008-04-14 15:10:42 -0700508 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
509 return;
510
David S. Miller5ce2d482008-07-08 17:06:30 -0700511 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700512 if (sch == NULL) {
513 WARN_ON(parentid != TC_H_ROOT);
514 return;
515 }
Patrick McHardy43effa12006-11-29 17:35:48 -0800516 cops = sch->ops->cl_ops;
517 if (cops->qlen_notify) {
518 cl = cops->get(sch, parentid);
519 cops->qlen_notify(sch, cl);
520 cops->put(sch, cl);
521 }
522 sch->q.qlen -= n;
523 }
524}
525EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
527/* Graft qdisc "new" to class "classid" of qdisc "parent" or
528 to device "dev".
529
530 Old qdisc is not destroyed but returned in *old.
531 */
532
533static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
534 u32 classid,
535 struct Qdisc *new, struct Qdisc **old)
536{
537 int err = 0;
538 struct Qdisc *q = *old;
539
540
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900541 if (parent == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 if (q && q->flags&TCQ_F_INGRESS) {
543 *old = dev_graft_qdisc(dev, q);
544 } else {
545 *old = dev_graft_qdisc(dev, new);
546 }
547 } else {
Eric Dumazet20fea082007-11-14 01:44:41 -0800548 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550 err = -EINVAL;
551
552 if (cops) {
553 unsigned long cl = cops->get(parent, classid);
554 if (cl) {
555 err = cops->graft(parent, cl, new, old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 cops->put(parent, cl);
557 }
558 }
559 }
560 return err;
561}
562
563/*
564 Allocate and initialize new qdisc.
565
566 Parameters are passed via opt.
567 */
568
569static struct Qdisc *
David S. Millerbb949fb2008-07-08 16:55:56 -0700570qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
571 u32 parent, u32 handle, struct nlattr **tca, int *errp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
573 int err;
Patrick McHardy1e904742008-01-22 22:11:17 -0800574 struct nlattr *kind = tca[TCA_KIND];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 struct Qdisc *sch;
576 struct Qdisc_ops *ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 ops = qdisc_lookup_ops(kind);
579#ifdef CONFIG_KMOD
580 if (ops == NULL && kind != NULL) {
581 char name[IFNAMSIZ];
Patrick McHardy1e904742008-01-22 22:11:17 -0800582 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 /* We dropped the RTNL semaphore in order to
584 * perform the module load. So, even if we
585 * succeeded in loading the module we have to
586 * tell the caller to replay the request. We
587 * indicate this using -EAGAIN.
588 * We replay the request because the device may
589 * go away in the mean time.
590 */
591 rtnl_unlock();
592 request_module("sch_%s", name);
593 rtnl_lock();
594 ops = qdisc_lookup_ops(kind);
595 if (ops != NULL) {
596 /* We will try again qdisc_lookup_ops,
597 * so don't keep a reference.
598 */
599 module_put(ops->owner);
600 err = -EAGAIN;
601 goto err_out;
602 }
603 }
604 }
605#endif
606
Jamal Hadi Salimb9e2cc02006-08-03 16:36:51 -0700607 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 if (ops == NULL)
609 goto err_out;
610
David S. Miller5ce2d482008-07-08 17:06:30 -0700611 sch = qdisc_alloc(dev_queue, ops);
Thomas Graf3d54b822005-07-05 14:15:09 -0700612 if (IS_ERR(sch)) {
613 err = PTR_ERR(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 goto err_out2;
Thomas Graf3d54b822005-07-05 14:15:09 -0700615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700617 sch->parent = parent;
618
Thomas Graf3d54b822005-07-05 14:15:09 -0700619 if (handle == TC_H_INGRESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 sch->flags |= TCQ_F_INGRESS;
Thomas Graf3d54b822005-07-05 14:15:09 -0700621 handle = TC_H_MAKE(TC_H_INGRESS, 0);
Patrick McHardyfd44de72007-04-16 17:07:08 -0700622 } else {
Patrick McHardyfd44de72007-04-16 17:07:08 -0700623 if (handle == 0) {
624 handle = qdisc_alloc_handle(dev);
625 err = -ENOMEM;
626 if (handle == 0)
627 goto err_out3;
628 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 }
630
Thomas Graf3d54b822005-07-05 14:15:09 -0700631 sch->handle = handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632
Patrick McHardy1e904742008-01-22 22:11:17 -0800633 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
634 if (tca[TCA_RATE]) {
Thomas Graf023e09a2005-07-05 14:15:53 -0700635 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
David S. Miller68dfb422008-07-08 22:57:31 -0700636 &sch->dev_queue->lock,
Patrick McHardy1e904742008-01-22 22:11:17 -0800637 tca[TCA_RATE]);
Thomas Graf023e09a2005-07-05 14:15:53 -0700638 if (err) {
639 /*
640 * Any broken qdiscs that would require
641 * a ops->reset() here? The qdisc was never
642 * in action so it shouldn't be necessary.
643 */
644 if (ops->destroy)
645 ops->destroy(sch);
646 goto err_out3;
647 }
648 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 qdisc_lock_tree(dev);
David S. Millerb0e1e642008-07-08 17:42:10 -0700650 list_add_tail(&sch->list, &dev_queue->qdisc_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 qdisc_unlock_tree(dev);
652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 return sch;
654 }
655err_out3:
656 dev_put(dev);
Thomas Graf3d54b822005-07-05 14:15:09 -0700657 kfree((char *) sch - sch->padded);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658err_out2:
659 module_put(ops->owner);
660err_out:
661 *errp = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 return NULL;
663}
664
Patrick McHardy1e904742008-01-22 22:11:17 -0800665static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666{
Patrick McHardy1e904742008-01-22 22:11:17 -0800667 if (tca[TCA_OPTIONS]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 int err;
669
670 if (sch->ops->change == NULL)
671 return -EINVAL;
Patrick McHardy1e904742008-01-22 22:11:17 -0800672 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (err)
674 return err;
675 }
Patrick McHardy1e904742008-01-22 22:11:17 -0800676 if (tca[TCA_RATE])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 gen_replace_estimator(&sch->bstats, &sch->rate_est,
David S. Miller68dfb422008-07-08 22:57:31 -0700678 &sch->dev_queue->lock, tca[TCA_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 return 0;
680}
681
682struct check_loop_arg
683{
684 struct qdisc_walker w;
685 struct Qdisc *p;
686 int depth;
687};
688
689static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
690
691static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
692{
693 struct check_loop_arg arg;
694
695 if (q->ops->cl_ops == NULL)
696 return 0;
697
698 arg.w.stop = arg.w.skip = arg.w.count = 0;
699 arg.w.fn = check_loop_fn;
700 arg.depth = depth;
701 arg.p = p;
702 q->ops->cl_ops->walk(q, &arg.w);
703 return arg.w.stop ? -ELOOP : 0;
704}
705
706static int
707check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
708{
709 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -0800710 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 struct check_loop_arg *arg = (struct check_loop_arg *)w;
712
713 leaf = cops->leaf(q, cl);
714 if (leaf) {
715 if (leaf == arg->p || arg->depth > 7)
716 return -ELOOP;
717 return check_loop(leaf, arg->p, arg->depth + 1);
718 }
719 return 0;
720}
721
722/*
723 * Delete/get qdisc.
724 */
725
726static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
727{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900728 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 struct tcmsg *tcm = NLMSG_DATA(n);
Patrick McHardy1e904742008-01-22 22:11:17 -0800730 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 struct net_device *dev;
732 u32 clid = tcm->tcm_parent;
733 struct Qdisc *q = NULL;
734 struct Qdisc *p = NULL;
735 int err;
736
Denis V. Lunevb8542722007-12-01 00:21:31 +1100737 if (net != &init_net)
738 return -EINVAL;
739
Eric W. Biederman881d9662007-09-17 11:56:21 -0700740 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return -ENODEV;
742
Patrick McHardy1e904742008-01-22 22:11:17 -0800743 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
744 if (err < 0)
745 return err;
746
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 if (clid) {
748 if (clid != TC_H_ROOT) {
749 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
750 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
751 return -ENOENT;
752 q = qdisc_leaf(p, clid);
753 } else { /* ingress */
David S. Miller816f3252008-07-08 22:49:00 -0700754 q = dev->rx_queue.qdisc;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900755 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 } else {
David S. Millere8a04642008-07-17 00:34:19 -0700757 struct netdev_queue *dev_queue;
758 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Millerb0e1e642008-07-08 17:42:10 -0700759 q = dev_queue->qdisc_sleeping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 }
761 if (!q)
762 return -ENOENT;
763
764 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
765 return -EINVAL;
766 } else {
767 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
768 return -ENOENT;
769 }
770
Patrick McHardy1e904742008-01-22 22:11:17 -0800771 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 return -EINVAL;
773
774 if (n->nlmsg_type == RTM_DELQDISC) {
775 if (!clid)
776 return -EINVAL;
777 if (q->handle == 0)
778 return -ENOENT;
779 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0)
780 return err;
781 if (q) {
782 qdisc_notify(skb, n, clid, q, NULL);
Patrick McHardyfd44de72007-04-16 17:07:08 -0700783 qdisc_lock_tree(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 qdisc_destroy(q);
Patrick McHardyfd44de72007-04-16 17:07:08 -0700785 qdisc_unlock_tree(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 }
787 } else {
788 qdisc_notify(skb, n, clid, NULL, q);
789 }
790 return 0;
791}
792
793/*
794 Create/change qdisc.
795 */
796
797static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
798{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900799 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 struct tcmsg *tcm;
Patrick McHardy1e904742008-01-22 22:11:17 -0800801 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 struct net_device *dev;
803 u32 clid;
804 struct Qdisc *q, *p;
805 int err;
806
Denis V. Lunevb8542722007-12-01 00:21:31 +1100807 if (net != &init_net)
808 return -EINVAL;
809
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810replay:
811 /* Reinit, just in case something touches this. */
812 tcm = NLMSG_DATA(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 clid = tcm->tcm_parent;
814 q = p = NULL;
815
Eric W. Biederman881d9662007-09-17 11:56:21 -0700816 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 return -ENODEV;
818
Patrick McHardy1e904742008-01-22 22:11:17 -0800819 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
820 if (err < 0)
821 return err;
822
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 if (clid) {
824 if (clid != TC_H_ROOT) {
825 if (clid != TC_H_INGRESS) {
826 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
827 return -ENOENT;
828 q = qdisc_leaf(p, clid);
829 } else { /*ingress */
David S. Miller816f3252008-07-08 22:49:00 -0700830 q = dev->rx_queue.qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 }
832 } else {
David S. Millere8a04642008-07-17 00:34:19 -0700833 struct netdev_queue *dev_queue;
834 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Millerb0e1e642008-07-08 17:42:10 -0700835 q = dev_queue->qdisc_sleeping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 }
837
838 /* It may be default qdisc, ignore it */
839 if (q && q->handle == 0)
840 q = NULL;
841
842 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
843 if (tcm->tcm_handle) {
844 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
845 return -EEXIST;
846 if (TC_H_MIN(tcm->tcm_handle))
847 return -EINVAL;
848 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
849 goto create_n_graft;
850 if (n->nlmsg_flags&NLM_F_EXCL)
851 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -0800852 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 return -EINVAL;
854 if (q == p ||
855 (p && check_loop(q, p, 0)))
856 return -ELOOP;
857 atomic_inc(&q->refcnt);
858 goto graft;
859 } else {
860 if (q == NULL)
861 goto create_n_graft;
862
863 /* This magic test requires explanation.
864 *
865 * We know, that some child q is already
866 * attached to this parent and have choice:
867 * either to change it or to create/graft new one.
868 *
869 * 1. We are allowed to create/graft only
870 * if CREATE and REPLACE flags are set.
871 *
872 * 2. If EXCL is set, requestor wanted to say,
873 * that qdisc tcm_handle is not expected
874 * to exist, so that we choose create/graft too.
875 *
876 * 3. The last case is when no flags are set.
877 * Alas, it is sort of hole in API, we
878 * cannot decide what to do unambiguously.
879 * For now we select create/graft, if
880 * user gave KIND, which does not match existing.
881 */
882 if ((n->nlmsg_flags&NLM_F_CREATE) &&
883 (n->nlmsg_flags&NLM_F_REPLACE) &&
884 ((n->nlmsg_flags&NLM_F_EXCL) ||
Patrick McHardy1e904742008-01-22 22:11:17 -0800885 (tca[TCA_KIND] &&
886 nla_strcmp(tca[TCA_KIND], q->ops->id))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 goto create_n_graft;
888 }
889 }
890 } else {
891 if (!tcm->tcm_handle)
892 return -EINVAL;
893 q = qdisc_lookup(dev, tcm->tcm_handle);
894 }
895
896 /* Change qdisc parameters */
897 if (q == NULL)
898 return -ENOENT;
899 if (n->nlmsg_flags&NLM_F_EXCL)
900 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -0800901 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 return -EINVAL;
903 err = qdisc_change(q, tca);
904 if (err == 0)
905 qdisc_notify(skb, n, clid, NULL, q);
906 return err;
907
908create_n_graft:
909 if (!(n->nlmsg_flags&NLM_F_CREATE))
910 return -ENOENT;
911 if (clid == TC_H_INGRESS)
David S. Millerbb949fb2008-07-08 16:55:56 -0700912 q = qdisc_create(dev, &dev->rx_queue,
913 tcm->tcm_parent, tcm->tcm_parent,
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700914 tca, &err);
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900915 else
David S. Millere8a04642008-07-17 00:34:19 -0700916 q = qdisc_create(dev, netdev_get_tx_queue(dev, 0),
David S. Millerbb949fb2008-07-08 16:55:56 -0700917 tcm->tcm_parent, tcm->tcm_handle,
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700918 tca, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 if (q == NULL) {
920 if (err == -EAGAIN)
921 goto replay;
922 return err;
923 }
924
925graft:
926 if (1) {
927 struct Qdisc *old_q = NULL;
928 err = qdisc_graft(dev, p, clid, q, &old_q);
929 if (err) {
930 if (q) {
Patrick McHardyfd44de72007-04-16 17:07:08 -0700931 qdisc_lock_tree(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 qdisc_destroy(q);
Patrick McHardyfd44de72007-04-16 17:07:08 -0700933 qdisc_unlock_tree(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 }
935 return err;
936 }
937 qdisc_notify(skb, n, clid, old_q, q);
938 if (old_q) {
Patrick McHardyfd44de72007-04-16 17:07:08 -0700939 qdisc_lock_tree(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 qdisc_destroy(old_q);
Patrick McHardyfd44de72007-04-16 17:07:08 -0700941 qdisc_unlock_tree(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 }
943 }
944 return 0;
945}
946
947static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
Jamal Hadi Salime431b8c2005-06-18 22:55:31 -0700948 u32 pid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949{
950 struct tcmsg *tcm;
951 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700952 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 struct gnet_dump d;
954
Jamal Hadi Salime431b8c2005-06-18 22:55:31 -0700955 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 tcm = NLMSG_DATA(nlh);
957 tcm->tcm_family = AF_UNSPEC;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700958 tcm->tcm__pad1 = 0;
959 tcm->tcm__pad2 = 0;
David S. Miller5ce2d482008-07-08 17:06:30 -0700960 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 tcm->tcm_parent = clid;
962 tcm->tcm_handle = q->handle;
963 tcm->tcm_info = atomic_read(&q->refcnt);
Patrick McHardy57e1c482008-01-23 20:34:28 -0800964 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 if (q->ops->dump && q->ops->dump(q, skb) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -0800966 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 q->qstats.qlen = q->q.qlen;
968
969 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
David S. Miller68dfb422008-07-08 22:57:31 -0700970 TCA_XSTATS, &q->dev_queue->lock, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -0800971 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -0800974 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
976 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 gnet_stats_copy_queue(&d, &q->qstats) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -0800979 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900980
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -0800982 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900983
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700984 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 return skb->len;
986
987nlmsg_failure:
Patrick McHardy1e904742008-01-22 22:11:17 -0800988nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -0700989 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 return -1;
991}
992
993static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
994 u32 clid, struct Qdisc *old, struct Qdisc *new)
995{
996 struct sk_buff *skb;
997 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
998
999 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1000 if (!skb)
1001 return -ENOBUFS;
1002
1003 if (old && old->handle) {
1004 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
1005 goto err_out;
1006 }
1007 if (new) {
1008 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1009 goto err_out;
1010 }
1011
1012 if (skb->len)
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08001013 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
1015err_out:
1016 kfree_skb(skb);
1017 return -EINVAL;
1018}
1019
1020static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1021{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001022 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 int idx, q_idx;
1024 int s_idx, s_q_idx;
1025 struct net_device *dev;
1026 struct Qdisc *q;
1027
Denis V. Lunevb8542722007-12-01 00:21:31 +11001028 if (net != &init_net)
1029 return 0;
1030
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 s_idx = cb->args[0];
1032 s_q_idx = q_idx = cb->args[1];
1033 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07001034 idx = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001035 for_each_netdev(&init_net, dev) {
David S. Millerb0e1e642008-07-08 17:42:10 -07001036 struct netdev_queue *dev_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 if (idx < s_idx)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001038 goto cont;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 if (idx > s_idx)
1040 s_q_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 q_idx = 0;
David S. Millere8a04642008-07-17 00:34:19 -07001042 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Millerb0e1e642008-07-08 17:42:10 -07001043 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 if (q_idx < s_q_idx) {
1045 q_idx++;
1046 continue;
1047 }
1048 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
Patrick McHardy0463d4a2007-04-16 17:02:10 -07001049 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 q_idx++;
1052 }
Pavel Emelianov7562f872007-05-03 15:13:45 -07001053cont:
1054 idx++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 }
1056
1057done:
1058 read_unlock(&dev_base_lock);
1059
1060 cb->args[0] = idx;
1061 cb->args[1] = q_idx;
1062
1063 return skb->len;
1064}
1065
1066
1067
1068/************************************************
1069 * Traffic classes manipulation. *
1070 ************************************************/
1071
1072
1073
1074static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1075{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001076 struct net *net = sock_net(skb->sk);
David S. Millerb0e1e642008-07-08 17:42:10 -07001077 struct netdev_queue *dev_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 struct tcmsg *tcm = NLMSG_DATA(n);
Patrick McHardy1e904742008-01-22 22:11:17 -08001079 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 struct net_device *dev;
1081 struct Qdisc *q = NULL;
Eric Dumazet20fea082007-11-14 01:44:41 -08001082 const struct Qdisc_class_ops *cops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 unsigned long cl = 0;
1084 unsigned long new_cl;
1085 u32 pid = tcm->tcm_parent;
1086 u32 clid = tcm->tcm_handle;
1087 u32 qid = TC_H_MAJ(clid);
1088 int err;
1089
Denis V. Lunevb8542722007-12-01 00:21:31 +11001090 if (net != &init_net)
1091 return -EINVAL;
1092
Eric W. Biederman881d9662007-09-17 11:56:21 -07001093 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 return -ENODEV;
1095
Patrick McHardy1e904742008-01-22 22:11:17 -08001096 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1097 if (err < 0)
1098 return err;
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 /*
1101 parent == TC_H_UNSPEC - unspecified parent.
1102 parent == TC_H_ROOT - class is root, which has no parent.
1103 parent == X:0 - parent is root class.
1104 parent == X:Y - parent is a node in hierarchy.
1105 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1106
1107 handle == 0:0 - generate handle from kernel pool.
1108 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1109 handle == X:Y - clear.
1110 handle == X:0 - root class.
1111 */
1112
1113 /* Step 1. Determine qdisc handle X:0 */
1114
David S. Millere8a04642008-07-17 00:34:19 -07001115 dev_queue = netdev_get_tx_queue(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 if (pid != TC_H_ROOT) {
1117 u32 qid1 = TC_H_MAJ(pid);
1118
1119 if (qid && qid1) {
1120 /* If both majors are known, they must be identical. */
1121 if (qid != qid1)
1122 return -EINVAL;
1123 } else if (qid1) {
1124 qid = qid1;
1125 } else if (qid == 0)
David S. Millerb0e1e642008-07-08 17:42:10 -07001126 qid = dev_queue->qdisc_sleeping->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
1128 /* Now qid is genuine qdisc handle consistent
1129 both with parent and child.
1130
1131 TC_H_MAJ(pid) still may be unspecified, complete it now.
1132 */
1133 if (pid)
1134 pid = TC_H_MAKE(qid, pid);
1135 } else {
1136 if (qid == 0)
David S. Millerb0e1e642008-07-08 17:42:10 -07001137 qid = dev_queue->qdisc_sleeping->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 }
1139
1140 /* OK. Locate qdisc */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001141 if ((q = qdisc_lookup(dev, qid)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 return -ENOENT;
1143
1144 /* An check that it supports classes */
1145 cops = q->ops->cl_ops;
1146 if (cops == NULL)
1147 return -EINVAL;
1148
1149 /* Now try to get class */
1150 if (clid == 0) {
1151 if (pid == TC_H_ROOT)
1152 clid = qid;
1153 } else
1154 clid = TC_H_MAKE(qid, clid);
1155
1156 if (clid)
1157 cl = cops->get(q, clid);
1158
1159 if (cl == 0) {
1160 err = -ENOENT;
1161 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
1162 goto out;
1163 } else {
1164 switch (n->nlmsg_type) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001165 case RTM_NEWTCLASS:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 err = -EEXIST;
1167 if (n->nlmsg_flags&NLM_F_EXCL)
1168 goto out;
1169 break;
1170 case RTM_DELTCLASS:
1171 err = cops->delete(q, cl);
1172 if (err == 0)
1173 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1174 goto out;
1175 case RTM_GETTCLASS:
1176 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
1177 goto out;
1178 default:
1179 err = -EINVAL;
1180 goto out;
1181 }
1182 }
1183
1184 new_cl = cl;
1185 err = cops->change(q, clid, pid, tca, &new_cl);
1186 if (err == 0)
1187 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1188
1189out:
1190 if (cl)
1191 cops->put(q, cl);
1192
1193 return err;
1194}
1195
1196
1197static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1198 unsigned long cl,
Jamal Hadi Salime431b8c2005-06-18 22:55:31 -07001199 u32 pid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200{
1201 struct tcmsg *tcm;
1202 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001203 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 struct gnet_dump d;
Eric Dumazet20fea082007-11-14 01:44:41 -08001205 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Jamal Hadi Salime431b8c2005-06-18 22:55:31 -07001207 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 tcm = NLMSG_DATA(nlh);
1209 tcm->tcm_family = AF_UNSPEC;
David S. Miller5ce2d482008-07-08 17:06:30 -07001210 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 tcm->tcm_parent = q->handle;
1212 tcm->tcm_handle = q->handle;
1213 tcm->tcm_info = 0;
Patrick McHardy57e1c482008-01-23 20:34:28 -08001214 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001216 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
1218 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
David S. Miller68dfb422008-07-08 22:57:31 -07001219 TCA_XSTATS, &q->dev_queue->lock, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001220 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
1222 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001223 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
1225 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001226 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001228 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 return skb->len;
1230
1231nlmsg_failure:
Patrick McHardy1e904742008-01-22 22:11:17 -08001232nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001233 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 return -1;
1235}
1236
1237static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1238 struct Qdisc *q, unsigned long cl, int event)
1239{
1240 struct sk_buff *skb;
1241 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1242
1243 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1244 if (!skb)
1245 return -ENOBUFS;
1246
1247 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1248 kfree_skb(skb);
1249 return -EINVAL;
1250 }
1251
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08001252 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253}
1254
1255struct qdisc_dump_args
1256{
1257 struct qdisc_walker w;
1258 struct sk_buff *skb;
1259 struct netlink_callback *cb;
1260};
1261
1262static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1263{
1264 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1265
1266 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1267 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1268}
1269
1270static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1271{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001272 struct net *net = sock_net(skb->sk);
David S. Millerb0e1e642008-07-08 17:42:10 -07001273 struct netdev_queue *dev_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 int t;
1275 int s_t;
1276 struct net_device *dev;
1277 struct Qdisc *q;
1278 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1279 struct qdisc_dump_args arg;
1280
Denis V. Lunevb8542722007-12-01 00:21:31 +11001281 if (net != &init_net)
1282 return 0;
1283
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1285 return 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001286 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 return 0;
1288
1289 s_t = cb->args[0];
1290 t = 0;
1291
David S. Millere8a04642008-07-17 00:34:19 -07001292 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Millerb0e1e642008-07-08 17:42:10 -07001293 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 if (t < s_t || !q->ops->cl_ops ||
1295 (tcm->tcm_parent &&
1296 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1297 t++;
1298 continue;
1299 }
1300 if (t > s_t)
1301 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1302 arg.w.fn = qdisc_class_dump;
1303 arg.skb = skb;
1304 arg.cb = cb;
1305 arg.w.stop = 0;
1306 arg.w.skip = cb->args[1];
1307 arg.w.count = 0;
1308 q->ops->cl_ops->walk(q, &arg.w);
1309 cb->args[1] = arg.w.count;
1310 if (arg.w.stop)
1311 break;
1312 t++;
1313 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
1315 cb->args[0] = t;
1316
1317 dev_put(dev);
1318 return skb->len;
1319}
1320
1321/* Main classifier routine: scans classifier chain attached
1322 to this qdisc, (optionally) tests for protocol and asks
1323 specific classifiers.
1324 */
Patrick McHardy73ca4912007-07-15 00:02:31 -07001325int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1326 struct tcf_result *res)
1327{
1328 __be16 protocol = skb->protocol;
1329 int err = 0;
1330
1331 for (; tp; tp = tp->next) {
1332 if ((tp->protocol == protocol ||
1333 tp->protocol == htons(ETH_P_ALL)) &&
1334 (err = tp->classify(skb, tp, res)) >= 0) {
1335#ifdef CONFIG_NET_CLS_ACT
1336 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1337 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1338#endif
1339 return err;
1340 }
1341 }
1342 return -1;
1343}
1344EXPORT_SYMBOL(tc_classify_compat);
1345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
Patrick McHardy73ca4912007-07-15 00:02:31 -07001347 struct tcf_result *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348{
1349 int err = 0;
Patrick McHardy73ca4912007-07-15 00:02:31 -07001350 __be16 protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351#ifdef CONFIG_NET_CLS_ACT
1352 struct tcf_proto *otp = tp;
1353reclassify:
1354#endif
1355 protocol = skb->protocol;
1356
Patrick McHardy73ca4912007-07-15 00:02:31 -07001357 err = tc_classify_compat(skb, tp, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy73ca4912007-07-15 00:02:31 -07001359 if (err == TC_ACT_RECLASSIFY) {
1360 u32 verd = G_TC_VERD(skb->tc_verd);
1361 tp = otp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
Patrick McHardy73ca4912007-07-15 00:02:31 -07001363 if (verd++ >= MAX_REC_LOOP) {
1364 printk("rule prio %u protocol %02x reclassify loop, "
1365 "packet dropped\n",
1366 tp->prio&0xffff, ntohs(tp->protocol));
1367 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 }
Patrick McHardy73ca4912007-07-15 00:02:31 -07001369 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1370 goto reclassify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 }
Patrick McHardy73ca4912007-07-15 00:02:31 -07001372#endif
1373 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374}
Patrick McHardy73ca4912007-07-15 00:02:31 -07001375EXPORT_SYMBOL(tc_classify);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
Patrick McHardya48b5a62007-03-23 11:29:43 -07001377void tcf_destroy(struct tcf_proto *tp)
1378{
1379 tp->ops->destroy(tp);
1380 module_put(tp->ops->owner);
1381 kfree(tp);
1382}
1383
Patrick McHardyff31ab52008-07-01 19:52:38 -07001384void tcf_destroy_chain(struct tcf_proto **fl)
Patrick McHardya48b5a62007-03-23 11:29:43 -07001385{
1386 struct tcf_proto *tp;
1387
Patrick McHardyff31ab52008-07-01 19:52:38 -07001388 while ((tp = *fl) != NULL) {
1389 *fl = tp->next;
Patrick McHardya48b5a62007-03-23 11:29:43 -07001390 tcf_destroy(tp);
1391 }
1392}
1393EXPORT_SYMBOL(tcf_destroy_chain);
1394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395#ifdef CONFIG_PROC_FS
1396static int psched_show(struct seq_file *seq, void *v)
1397{
Patrick McHardy3c0cfc12007-10-10 16:32:41 -07001398 struct timespec ts;
1399
1400 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 seq_printf(seq, "%08x %08x %08x %08x\n",
Patrick McHardy641b9e02007-03-16 01:18:42 -07001402 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
Patrick McHardy514bca32007-03-16 12:34:52 -07001403 1000000,
Patrick McHardy3c0cfc12007-10-10 16:32:41 -07001404 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
1406 return 0;
1407}
1408
1409static int psched_open(struct inode *inode, struct file *file)
1410{
1411 return single_open(file, psched_show, PDE(inode)->data);
1412}
1413
Arjan van de Venda7071d2007-02-12 00:55:36 -08001414static const struct file_operations psched_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 .owner = THIS_MODULE,
1416 .open = psched_open,
1417 .read = seq_read,
1418 .llseek = seq_lseek,
1419 .release = single_release,
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001420};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421#endif
1422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423static int __init pktsched_init(void)
1424{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 register_qdisc(&pfifo_qdisc_ops);
1426 register_qdisc(&bfifo_qdisc_ops);
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02001427 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
Thomas Grafbe577dd2007-03-22 11:55:50 -07001429 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1430 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1431 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1432 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1433 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1434 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1435
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 return 0;
1437}
1438
1439subsys_initcall(pktsched_init);