blob: 1695421333e3833a837dbb84f9b28086fbc88c46 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net/sched/sch_red.c Random Early Detection queue.
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
Thomas Grafdba051f2005-11-05 21:14:08 +01008 * J Hadi Salim 980914: computation fixes
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
Thomas Grafdba051f2005-11-05 21:14:08 +010010 * J Hadi Salim 980816: ECN support
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/types.h>
15#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <net/pkt_sched.h>
Nogah Frankel602f3ba2017-11-06 07:23:41 +010018#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <net/inet_ecn.h>
Thomas Graf6b31b282005-11-05 21:14:05 +010020#include <net/red.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22
Thomas Graf6b31b282005-11-05 21:14:05 +010023/* Parameters, settable by user:
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 -----------------------------
25
26 limit - bytes (must be > qth_max + burst)
27
28 Hard limit on queue length, should be chosen >qth_max
29 to allow packet bursts. This parameter does not
30 affect the algorithms behaviour and can be chosen
31 arbitrarily high (well, less than ram size)
32 Really, this limit will never be reached
33 if RED works correctly.
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 */
35
Eric Dumazetcc7ec452011-01-19 19:26:56 +000036struct red_sched_data {
Thomas Graf6b31b282005-11-05 21:14:05 +010037 u32 limit; /* HARD maximal queue length */
38 unsigned char flags;
Eric Dumazet8af2a212011-12-08 06:06:03 +000039 struct timer_list adapt_timer;
Kees Cookcdeabbb2017-10-16 17:29:17 -070040 struct Qdisc *sch;
Thomas Graf6b31b282005-11-05 21:14:05 +010041 struct red_parms parms;
Eric Dumazeteeca6682012-01-05 02:25:16 +000042 struct red_vars vars;
Thomas Graf6b31b282005-11-05 21:14:05 +010043 struct red_stats stats;
Patrick McHardyf38c39d2006-03-20 19:20:44 -080044 struct Qdisc *qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045};
46
Thomas Graf6b31b282005-11-05 21:14:05 +010047static inline int red_use_ecn(struct red_sched_data *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048{
Thomas Graf6b31b282005-11-05 21:14:05 +010049 return q->flags & TC_RED_ECN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050}
51
Thomas Grafbdc450a2005-11-05 21:14:28 +010052static inline int red_use_harddrop(struct red_sched_data *q)
53{
54 return q->flags & TC_RED_HARDDROP;
55}
56
Eric Dumazet520ac302016-06-21 23:16:49 -070057static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
58 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
60 struct red_sched_data *q = qdisc_priv(sch);
Patrick McHardyf38c39d2006-03-20 19:20:44 -080061 struct Qdisc *child = q->qdisc;
62 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Eric Dumazeteeca6682012-01-05 02:25:16 +000064 q->vars.qavg = red_calc_qavg(&q->parms,
65 &q->vars,
66 child->qstats.backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Eric Dumazeteeca6682012-01-05 02:25:16 +000068 if (red_is_idling(&q->vars))
69 red_end_of_idle_period(&q->vars);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Eric Dumazeteeca6682012-01-05 02:25:16 +000071 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +000072 case RED_DONT_MARK:
73 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Eric Dumazetcc7ec452011-01-19 19:26:56 +000075 case RED_PROB_MARK:
John Fastabend25331d62014-09-28 11:53:29 -070076 qdisc_qstats_overlimit(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +000077 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
78 q->stats.prob_drop++;
79 goto congestion_drop;
80 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Eric Dumazetcc7ec452011-01-19 19:26:56 +000082 q->stats.prob_mark++;
83 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Eric Dumazetcc7ec452011-01-19 19:26:56 +000085 case RED_HARD_MARK:
John Fastabend25331d62014-09-28 11:53:29 -070086 qdisc_qstats_overlimit(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +000087 if (red_use_harddrop(q) || !red_use_ecn(q) ||
88 !INET_ECN_set_ce(skb)) {
89 q->stats.forced_drop++;
90 goto congestion_drop;
91 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Eric Dumazetcc7ec452011-01-19 19:26:56 +000093 q->stats.forced_mark++;
94 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 }
96
Eric Dumazet520ac302016-06-21 23:16:49 -070097 ret = qdisc_enqueue(skb, child, to_free);
Patrick McHardyf38c39d2006-03-20 19:20:44 -080098 if (likely(ret == NET_XMIT_SUCCESS)) {
WANG Congd7f4f332016-06-01 16:15:18 -070099 qdisc_qstats_backlog_inc(sch, skb);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800100 sch->q.qlen++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700101 } else if (net_xmit_drop_count(ret)) {
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800102 q->stats.pdrop++;
John Fastabend25331d62014-09-28 11:53:29 -0700103 qdisc_qstats_drop(sch);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800104 }
105 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Thomas Graf6b31b282005-11-05 21:14:05 +0100107congestion_drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700108 qdisc_drop(skb, sch, to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 return NET_XMIT_CN;
110}
111
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000112static struct sk_buff *red_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 struct sk_buff *skb;
115 struct red_sched_data *q = qdisc_priv(sch);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800116 struct Qdisc *child = q->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800118 skb = child->dequeue(child);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800119 if (skb) {
120 qdisc_bstats_update(sch, skb);
WANG Congd7f4f332016-06-01 16:15:18 -0700121 qdisc_qstats_backlog_dec(sch, skb);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800122 sch->q.qlen--;
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800123 } else {
Eric Dumazeteeca6682012-01-05 02:25:16 +0000124 if (!red_is_idling(&q->vars))
125 red_start_of_idle_period(&q->vars);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800126 }
Thomas Graf9e178ff2005-11-05 21:14:06 +0100127 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000130static struct sk_buff *red_peek(struct Qdisc *sch)
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700131{
132 struct red_sched_data *q = qdisc_priv(sch);
133 struct Qdisc *child = q->qdisc;
134
135 return child->ops->peek(child);
136}
137
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000138static void red_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
140 struct red_sched_data *q = qdisc_priv(sch);
141
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800142 qdisc_reset(q->qdisc);
WANG Congd7f4f332016-06-01 16:15:18 -0700143 sch->qstats.backlog = 0;
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800144 sch->q.qlen = 0;
Eric Dumazeteeca6682012-01-05 02:25:16 +0000145 red_restart(&q->vars);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146}
147
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100148static int red_offload(struct Qdisc *sch, bool enable)
149{
150 struct red_sched_data *q = qdisc_priv(sch);
151 struct net_device *dev = qdisc_dev(sch);
152 struct tc_red_qopt_offload opt = {
153 .handle = sch->handle,
154 .parent = sch->parent,
155 };
156
157 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
158 return -EOPNOTSUPP;
159
160 if (enable) {
161 opt.command = TC_RED_REPLACE;
162 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
163 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
164 opt.set.probability = q->parms.max_P;
Jakub Kicinskic0b74902018-11-12 14:58:16 -0800165 opt.set.limit = q->limit;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100166 opt.set.is_ecn = red_use_ecn(q);
Jakub Kicinski190852a2018-11-08 19:50:38 -0800167 opt.set.is_harddrop = red_use_harddrop(q);
Jakub Kicinski416ef9b2018-01-14 20:01:26 -0800168 opt.set.qstats = &sch->qstats;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100169 } else {
170 opt.command = TC_RED_DESTROY;
171 }
172
Nogah Frankel8234af22017-12-25 10:51:41 +0200173 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100174}
175
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800176static void red_destroy(struct Qdisc *sch)
177{
178 struct red_sched_data *q = qdisc_priv(sch);
Eric Dumazet8af2a212011-12-08 06:06:03 +0000179
180 del_timer_sync(&q->adapt_timer);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100181 red_offload(sch, false);
Vlad Buslov86bd4462018-09-24 19:22:50 +0300182 qdisc_put(q->qdisc);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800183}
184
Patrick McHardy27a34212008-01-23 20:35:39 -0800185static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
186 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
187 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
Eric Dumazeta73ed262011-12-09 02:46:45 +0000188 [TCA_RED_MAX_P] = { .type = NLA_U32 },
Patrick McHardy27a34212008-01-23 20:35:39 -0800189};
190
Alexander Aring20307212017-12-20 12:35:14 -0500191static int red_change(struct Qdisc *sch, struct nlattr *opt,
192 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Jakub Kicinski0c8d13a2018-11-07 17:33:39 -0800194 struct Qdisc *old_child = NULL, *child = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 struct red_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800196 struct nlattr *tb[TCA_RED_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 struct tc_red_qopt *ctl;
Patrick McHardycee63722008-01-23 20:33:32 -0800198 int err;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000199 u32 max_P;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Patrick McHardycee63722008-01-23 20:33:32 -0800201 if (opt == NULL)
Thomas Grafdba051f2005-11-05 21:14:08 +0100202 return -EINVAL;
203
Johannes Berg8cb08172019-04-26 14:07:28 +0200204 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
205 NULL);
Patrick McHardycee63722008-01-23 20:33:32 -0800206 if (err < 0)
207 return err;
208
Patrick McHardy1e904742008-01-22 22:11:17 -0800209 if (tb[TCA_RED_PARMS] == NULL ||
Patrick McHardy27a34212008-01-23 20:35:39 -0800210 tb[TCA_RED_STAB] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 return -EINVAL;
212
Eric Dumazeta73ed262011-12-09 02:46:45 +0000213 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
214
Patrick McHardy1e904742008-01-22 22:11:17 -0800215 ctl = nla_data(tb[TCA_RED_PARMS]);
Nogah Frankel8afa10c2017-12-04 13:31:11 +0200216 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
217 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800219 if (ctl->limit > 0) {
Alexander Aringa38a9882017-12-20 12:35:21 -0500220 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
221 extack);
Patrick McHardyfb0305c2008-07-05 23:40:21 -0700222 if (IS_ERR(child))
223 return PTR_ERR(child);
Paolo Abeni44a63b132018-05-18 14:51:44 +0200224
225 /* child is fifo, no need to check for noop_qdisc */
226 qdisc_hash_add(child, true);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800227 }
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 sch_tree_lock(sch);
230 q->flags = ctl->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 q->limit = ctl->limit;
Patrick McHardy5e50da02006-11-29 17:36:20 -0800232 if (child) {
Paolo Abenie5f0e8f2019-03-28 16:53:13 +0100233 qdisc_tree_flush_backlog(q->qdisc);
Jakub Kicinski0c8d13a2018-11-07 17:33:39 -0800234 old_child = q->qdisc;
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800235 q->qdisc = child;
Patrick McHardy5e50da02006-11-29 17:36:20 -0800236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Eric Dumazeteeca6682012-01-05 02:25:16 +0000238 red_set_parms(&q->parms,
239 ctl->qth_min, ctl->qth_max, ctl->Wlog,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000240 ctl->Plog, ctl->Scell_log,
241 nla_data(tb[TCA_RED_STAB]),
242 max_P);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000243 red_set_vars(&q->vars);
Thomas Graf6b31b282005-11-05 21:14:05 +0100244
Eric Dumazet8af2a212011-12-08 06:06:03 +0000245 del_timer(&q->adapt_timer);
246 if (ctl->flags & TC_RED_ADAPTATIVE)
247 mod_timer(&q->adapt_timer, jiffies + HZ/2);
248
Eric Dumazet1ee5fa12011-12-01 11:06:34 +0000249 if (!q->qdisc->q.qlen)
Eric Dumazeteeca6682012-01-05 02:25:16 +0000250 red_start_of_idle_period(&q->vars);
Thomas Grafdba051f2005-11-05 21:14:08 +0100251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 sch_tree_unlock(sch);
Jakub Kicinski0c8d13a2018-11-07 17:33:39 -0800253
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100254 red_offload(sch, true);
Jakub Kicinski0c8d13a2018-11-07 17:33:39 -0800255
256 if (old_child)
257 qdisc_put(old_child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 return 0;
259}
260
Kees Cookcdeabbb2017-10-16 17:29:17 -0700261static inline void red_adaptative_timer(struct timer_list *t)
Eric Dumazet8af2a212011-12-08 06:06:03 +0000262{
Kees Cookcdeabbb2017-10-16 17:29:17 -0700263 struct red_sched_data *q = from_timer(q, t, adapt_timer);
264 struct Qdisc *sch = q->sch;
Eric Dumazet8af2a212011-12-08 06:06:03 +0000265 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
266
267 spin_lock(root_lock);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000268 red_adaptative_algo(&q->parms, &q->vars);
Eric Dumazet8af2a212011-12-08 06:06:03 +0000269 mod_timer(&q->adapt_timer, jiffies + HZ/2);
270 spin_unlock(root_lock);
271}
272
Alexander Aringe63d7df2017-12-20 12:35:13 -0500273static int red_init(struct Qdisc *sch, struct nlattr *opt,
274 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800276 struct red_sched_data *q = qdisc_priv(sch);
277
278 q->qdisc = &noop_qdisc;
Kees Cookcdeabbb2017-10-16 17:29:17 -0700279 q->sch = sch;
280 timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
Alexander Aring20307212017-12-20 12:35:14 -0500281 return red_change(sch, opt, extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282}
283
Jakub Kicinskidad54c02018-11-07 17:33:35 -0800284static int red_dump_offload_stats(struct Qdisc *sch)
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100285{
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100286 struct tc_red_qopt_offload hw_stats = {
Andrew Mortonee9d3422017-11-10 15:09:53 -0800287 .command = TC_RED_STATS,
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100288 .handle = sch->handle,
289 .parent = sch->parent,
Andrew Mortonee9d3422017-11-10 15:09:53 -0800290 {
291 .stats.bstats = &sch->bstats,
292 .stats.qstats = &sch->qstats,
293 },
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100294 };
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100295
Jakub Kicinskib5928432018-11-07 17:33:34 -0800296 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100297}
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
300{
301 struct red_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800302 struct nlattr *opts = NULL;
Thomas Graf6b31b282005-11-05 21:14:05 +0100303 struct tc_red_qopt opt = {
304 .limit = q->limit,
305 .flags = q->flags,
306 .qth_min = q->parms.qth_min >> q->parms.Wlog,
307 .qth_max = q->parms.qth_max >> q->parms.Wlog,
308 .Wlog = q->parms.Wlog,
309 .Plog = q->parms.Plog,
310 .Scell_log = q->parms.Scell_log,
311 };
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100312 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Jakub Kicinskidad54c02018-11-07 17:33:35 -0800314 err = red_dump_offload_stats(sch);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100315 if (err)
316 goto nla_put_failure;
317
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200318 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy1e904742008-01-22 22:11:17 -0800319 if (opts == NULL)
320 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400321 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
322 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
323 goto nla_put_failure;
Patrick McHardy1e904742008-01-22 22:11:17 -0800324 return nla_nest_end(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Patrick McHardy1e904742008-01-22 22:11:17 -0800326nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -0700327 nla_nest_cancel(skb, opts);
328 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329}
330
331static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
332{
333 struct red_sched_data *q = qdisc_priv(sch);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100334 struct net_device *dev = qdisc_dev(sch);
Nogah Frankelf8253df2018-01-10 14:59:59 +0100335 struct tc_red_xstats st = {0};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Yuval Mintz428a68a2017-12-14 15:54:30 +0200337 if (sch->flags & TCQ_F_OFFLOADED) {
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100338 struct tc_red_qopt_offload hw_stats_request = {
Andrew Mortonee9d3422017-11-10 15:09:53 -0800339 .command = TC_RED_XSTATS,
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100340 .handle = sch->handle,
341 .parent = sch->parent,
Andrew Mortonee9d3422017-11-10 15:09:53 -0800342 {
Nogah Frankelf8253df2018-01-10 14:59:59 +0100343 .xstats = &q->stats,
Andrew Mortonee9d3422017-11-10 15:09:53 -0800344 },
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100345 };
Nogah Frankelf8253df2018-01-10 14:59:59 +0100346 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
347 &hw_stats_request);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100348 }
Nogah Frankelf8253df2018-01-10 14:59:59 +0100349 st.early = q->stats.prob_drop + q->stats.forced_drop;
350 st.pdrop = q->stats.pdrop;
351 st.other = q->stats.other;
352 st.marked = q->stats.prob_mark + q->stats.forced_mark;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100353
Thomas Graf6b31b282005-11-05 21:14:05 +0100354 return gnet_stats_copy_app(d, &st, sizeof(st));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
356
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800357static int red_dump_class(struct Qdisc *sch, unsigned long cl,
358 struct sk_buff *skb, struct tcmsg *tcm)
359{
360 struct red_sched_data *q = qdisc_priv(sch);
361
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800362 tcm->tcm_handle |= TC_H_MIN(1);
363 tcm->tcm_info = q->qdisc->handle;
364 return 0;
365}
366
Jakub Kicinskibf2a7522018-11-12 14:58:13 -0800367static void red_graft_offload(struct Qdisc *sch,
368 struct Qdisc *new, struct Qdisc *old,
369 struct netlink_ext_ack *extack)
370{
371 struct tc_red_qopt_offload graft_offload = {
372 .handle = sch->handle,
373 .parent = sch->parent,
374 .child_handle = new->handle,
375 .command = TC_RED_GRAFT,
376 };
377
378 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
379 TC_SETUP_QDISC_RED, &graft_offload, extack);
380}
381
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800382static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -0500383 struct Qdisc **old, struct netlink_ext_ack *extack)
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800384{
385 struct red_sched_data *q = qdisc_priv(sch);
386
387 if (new == NULL)
388 new = &noop_qdisc;
389
WANG Cong86a79962016-02-25 14:55:00 -0800390 *old = qdisc_replace(sch, new, &q->qdisc);
Jakub Kicinskibf2a7522018-11-12 14:58:13 -0800391
392 red_graft_offload(sch, new, *old, extack);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800393 return 0;
394}
395
396static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
397{
398 struct red_sched_data *q = qdisc_priv(sch);
399 return q->qdisc;
400}
401
WANG Cong143976c2017-08-24 16:51:29 -0700402static unsigned long red_find(struct Qdisc *sch, u32 classid)
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800403{
404 return 1;
405}
406
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800407static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
408{
409 if (!walker->stop) {
410 if (walker->count >= walker->skip)
411 if (walker->fn(sch, 1, walker) < 0) {
412 walker->stop = 1;
413 return;
414 }
415 walker->count++;
416 }
417}
418
Eric Dumazet20fea082007-11-14 01:44:41 -0800419static const struct Qdisc_class_ops red_class_ops = {
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800420 .graft = red_graft,
421 .leaf = red_leaf,
WANG Cong143976c2017-08-24 16:51:29 -0700422 .find = red_find,
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800423 .walk = red_walk,
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800424 .dump = red_dump_class,
425};
426
Eric Dumazet20fea082007-11-14 01:44:41 -0800427static struct Qdisc_ops red_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 .id = "red",
429 .priv_size = sizeof(struct red_sched_data),
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800430 .cl_ops = &red_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 .enqueue = red_enqueue,
432 .dequeue = red_dequeue,
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700433 .peek = red_peek,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 .init = red_init,
435 .reset = red_reset,
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800436 .destroy = red_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 .change = red_change,
438 .dump = red_dump,
439 .dump_stats = red_dump_stats,
440 .owner = THIS_MODULE,
441};
442
443static int __init red_module_init(void)
444{
445 return register_qdisc(&red_qdisc_ops);
446}
Thomas Grafdba051f2005-11-05 21:14:08 +0100447
448static void __exit red_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449{
450 unregister_qdisc(&red_qdisc_ops);
451}
Thomas Grafdba051f2005-11-05 21:14:08 +0100452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453module_init(red_module_init)
454module_exit(red_module_exit)
Thomas Grafdba051f2005-11-05 21:14:08 +0100455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456MODULE_LICENSE("GPL");