2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00
linux/net/sched/sch_codel.c
William Liu 52bf272636 net/sched: Fix backlog accounting in qdisc_dequeue_internal
This issue applies for the following qdiscs: hhf, fq, fq_codel, and
fq_pie, and occurs in their change handlers when adjusting to the new
limit. The problem is the following in the values passed to the
subsequent qdisc_tree_reduce_backlog call given a tbf parent:

   When the tbf parent runs out of tokens, skbs of these qdiscs will
   be placed in gso_skb. Their peek handlers are qdisc_peek_dequeued,
   which accounts for both qlen and backlog. However, in the case of
   qdisc_dequeue_internal, ONLY qlen is accounted for when pulling
   from gso_skb. This means that these qdiscs are missing a
   qdisc_qstats_backlog_dec when dropping packets to satisfy the
   new limit in their change handlers.

   One can observe this issue with the following (with tc patched to
   support a limit of 0):

   export TARGET=fq
   tc qdisc del dev lo root
   tc qdisc add dev lo root handle 1: tbf rate 8bit burst 100b latency 1ms
   tc qdisc replace dev lo handle 3: parent 1:1 $TARGET limit 1000
   echo ''; echo 'add child'; tc -s -d qdisc show dev lo
   ping -I lo -f -c2 -s32 -W0.001 127.0.0.1 2>&1 >/dev/null
   echo ''; echo 'after ping'; tc -s -d qdisc show dev lo
   tc qdisc change dev lo handle 3: parent 1:1 $TARGET limit 0
   echo ''; echo 'after limit drop'; tc -s -d qdisc show dev lo
   tc qdisc replace dev lo handle 2: parent 1:1 sfq
   echo ''; echo 'post graft'; tc -s -d qdisc show dev lo

   The second to last show command shows 0 packets but a positive
   number (74) of backlog bytes. The problem becomes clearer in the
   last show command, where qdisc_purge_queue triggers
   qdisc_tree_reduce_backlog with the positive backlog and causes an
   underflow in the tbf parent's backlog (4096 Mb instead of 0).

To fix this issue, the codepath for all clients of qdisc_dequeue_internal
has been simplified: codel, pie, hhf, fq, fq_pie, and fq_codel.
qdisc_dequeue_internal handles the backlog adjustments for all cases that
do not directly use the dequeue handler.

The old fq_codel_change limit adjustment loop accumulated the arguments to
the subsequent qdisc_tree_reduce_backlog call through the cstats field.
However, this is confusing and error prone as fq_codel_dequeue could also
potentially mutate this field (which qdisc_dequeue_internal calls in the
non gso_skb case), so we have unified the code here with other qdiscs.

Fixes: 2d3cbfd6d5 ("net_sched: Flush gso_skb list too during ->change()")
Fixes: 4b549a2ef4 ("fq_codel: Fair Queue Codel AQM")
Fixes: 10239edf86 ("net-qdisc-hhf: Heavy-Hitter Filter (HHF) qdisc")
Signed-off-by: William Liu <will@willsroot.io>
Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
Link: https://patch.msgid.link/20250812235725.45243-1-will@willsroot.io
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-08-14 17:52:29 -07:00

286 lines
7.0 KiB
C

// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Codel - The Controlled-Delay Active Queue Management algorithm
*
* Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
* Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
*
* Implemented on linux by :
* Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
* Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/prefetch.h>
#include <net/pkt_sched.h>
#include <net/codel.h>
#include <net/codel_impl.h>
#include <net/codel_qdisc.h>
#define DEFAULT_CODEL_LIMIT 1000
struct codel_sched_data {
struct codel_params params;
struct codel_vars vars;
struct codel_stats stats;
u32 drop_overlimit;
};
/* This is the specific function called from codel_dequeue()
* to dequeue a packet from queue. Note: backlog is handled in
* codel, we dont need to reduce it here.
*/
static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
{
struct Qdisc *sch = ctx;
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
if (skb) {
sch->qstats.backlog -= qdisc_pkt_len(skb);
prefetch(&skb->end); /* we'll need skb_shinfo() */
}
return skb;
}
static void drop_func(struct sk_buff *skb, void *ctx)
{
struct Qdisc *sch = ctx;
kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED);
qdisc_qstats_drop(sch);
}
static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
{
struct codel_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
&q->stats, qdisc_pkt_len, codel_get_enqueue_time,
drop_func, dequeue_func);
if (q->stats.drop_count) {
qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
q->stats.drop_count = 0;
q->stats.drop_len = 0;
}
if (skb)
qdisc_bstats_update(sch, skb);
return skb;
}
static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct codel_sched_data *q;
if (likely(qdisc_qlen(sch) < sch->limit)) {
codel_set_enqueue_time(skb);
return qdisc_enqueue_tail(skb, sch);
}
q = qdisc_priv(sch);
q->drop_overlimit++;
return qdisc_drop_reason(skb, sch, to_free,
SKB_DROP_REASON_QDISC_OVERLIMIT);
}
static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
[TCA_CODEL_TARGET] = { .type = NLA_U32 },
[TCA_CODEL_LIMIT] = { .type = NLA_U32 },
[TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
[TCA_CODEL_ECN] = { .type = NLA_U32 },
[TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
};
static int codel_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct codel_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CODEL_MAX + 1];
int err;
err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
codel_policy, NULL);
if (err < 0)
return err;
sch_tree_lock(sch);
if (tb[TCA_CODEL_TARGET]) {
u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
WRITE_ONCE(q->params.target,
((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT);
}
if (tb[TCA_CODEL_CE_THRESHOLD]) {
u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
WRITE_ONCE(q->params.ce_threshold,
(val * NSEC_PER_USEC) >> CODEL_SHIFT);
}
if (tb[TCA_CODEL_INTERVAL]) {
u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
WRITE_ONCE(q->params.interval,
((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT);
}
if (tb[TCA_CODEL_LIMIT])
WRITE_ONCE(sch->limit,
nla_get_u32(tb[TCA_CODEL_LIMIT]));
if (tb[TCA_CODEL_ECN])
WRITE_ONCE(q->params.ecn,
!!nla_get_u32(tb[TCA_CODEL_ECN]));
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
if (!skb)
break;
dropped_pkts++;
dropped_bytes += qdisc_pkt_len(skb);
rtnl_qdisc_drop(skb, sch);
}
qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return 0;
}
static int codel_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct codel_sched_data *q = qdisc_priv(sch);
sch->limit = DEFAULT_CODEL_LIMIT;
codel_params_init(&q->params);
codel_vars_init(&q->vars);
codel_stats_init(&q->stats);
q->params.mtu = psched_mtu(qdisc_dev(sch));
if (opt) {
int err = codel_change(sch, opt, extack);
if (err)
return err;
}
if (sch->limit >= 1)
sch->flags |= TCQ_F_CAN_BYPASS;
else
sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct codel_sched_data *q = qdisc_priv(sch);
codel_time_t ce_threshold;
struct nlattr *opts;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CODEL_TARGET,
codel_time_to_us(READ_ONCE(q->params.target))) ||
nla_put_u32(skb, TCA_CODEL_LIMIT,
READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_CODEL_INTERVAL,
codel_time_to_us(READ_ONCE(q->params.interval))) ||
nla_put_u32(skb, TCA_CODEL_ECN,
READ_ONCE(q->params.ecn)))
goto nla_put_failure;
ce_threshold = READ_ONCE(q->params.ce_threshold);
if (ce_threshold != CODEL_DISABLED_THRESHOLD &&
nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
codel_time_to_us(ce_threshold)))
goto nla_put_failure;
return nla_nest_end(skb, opts);
nla_put_failure:
nla_nest_cancel(skb, opts);
return -1;
}
static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
const struct codel_sched_data *q = qdisc_priv(sch);
struct tc_codel_xstats st = {
.maxpacket = q->stats.maxpacket,
.count = q->vars.count,
.lastcount = q->vars.lastcount,
.drop_overlimit = q->drop_overlimit,
.ldelay = codel_time_to_us(q->vars.ldelay),
.dropping = q->vars.dropping,
.ecn_mark = q->stats.ecn_mark,
.ce_mark = q->stats.ce_mark,
};
if (q->vars.dropping) {
codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
if (delta >= 0)
st.drop_next = codel_time_to_us(delta);
else
st.drop_next = -codel_time_to_us(-delta);
}
return gnet_stats_copy_app(d, &st, sizeof(st));
}
static void codel_reset(struct Qdisc *sch)
{
struct codel_sched_data *q = qdisc_priv(sch);
qdisc_reset_queue(sch);
codel_vars_init(&q->vars);
}
static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
.id = "codel",
.priv_size = sizeof(struct codel_sched_data),
.enqueue = codel_qdisc_enqueue,
.dequeue = codel_qdisc_dequeue,
.peek = qdisc_peek_dequeued,
.init = codel_init,
.reset = codel_reset,
.change = codel_change,
.dump = codel_dump,
.dump_stats = codel_dump_stats,
.owner = THIS_MODULE,
};
MODULE_ALIAS_NET_SCH("codel");
static int __init codel_module_init(void)
{
return register_qdisc(&codel_qdisc_ops);
}
static void __exit codel_module_exit(void)
{
unregister_qdisc(&codel_qdisc_ops);
}
module_init(codel_module_init)
module_exit(codel_module_exit)
MODULE_DESCRIPTION("Controlled Delay queue discipline");
MODULE_AUTHOR("Dave Taht");
MODULE_AUTHOR("Eric Dumazet");
MODULE_LICENSE("Dual BSD/GPL");