net_sched: update hierarchical backlog too
authorWANG Cong <xiyou.wangcong@gmail.com>
Thu, 25 Feb 2016 22:55:01 +0000 (14:55 -0800)
committerSasha Levin <sasha.levin@oracle.com>
Tue, 12 Jul 2016 12:48:11 +0000 (08:48 -0400)
[ Upstream commit 2ccccf5fb43ff62b2b96cc58d95fc0b3596516e4 ]

When the bottom qdisc decides to, for example, drop some packet,
it calls qdisc_tree_decrease_qlen() to update the queue length
for all its ancestors, we need to update the backlog too to
keep the stats on root qdisc accurate.

Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
21 files changed:
include/net/codel.h
include/net/sch_generic.h
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sched/sch_choke.c
net/sched/sch_codel.c
net/sched/sch_drr.c
net/sched/sch_fq.c
net/sched/sch_fq_codel.c
net/sched/sch_hfsc.c
net/sched/sch_hhf.c
net/sched/sch_htb.c
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_pie.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_tbf.c

index aeee28081245c9215f10badd611f58ba0124fcd0..7302a4df618c507ab50991d891ca9636aa86b9cf 100644 (file)
@@ -158,11 +158,13 @@ struct codel_vars {
  * struct codel_stats - contains codel shared variables and stats
  * @maxpacket: largest packet we've seen so far
  * @drop_count:        temp count of dropped packets in dequeue()
+ * @drop_len:  bytes of dropped packets in dequeue()
  * ecn_mark:   number of packets we ECN marked instead of dropping
  */
 struct codel_stats {
        u32             maxpacket;
        u32             drop_count;
+       u32             drop_len;
        u32             ecn_mark;
 };
 
@@ -297,6 +299,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
                                                                  vars->rec_inv_sqrt);
                                        goto end;
                                }
+                               stats->drop_len += qdisc_pkt_len(skb);
                                qdisc_drop(skb, sch);
                                stats->drop_count++;
                                skb = dequeue_func(vars, sch);
@@ -319,6 +322,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
                if (params->ecn && INET_ECN_set_ce(skb)) {
                        stats->ecn_mark++;
                } else {
+                       stats->drop_len += qdisc_pkt_len(skb);
                        qdisc_drop(skb, sch);
                        stats->drop_count++;
 
index d8635b3082719e71de8172fdbc84cf2563147401..f10b01467f079d233c76875ecc16e366a1dca3a7 100644 (file)
@@ -393,7 +393,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
                              struct Qdisc *qdisc);
 void qdisc_reset(struct Qdisc *qdisc);
 void qdisc_destroy(struct Qdisc *qdisc);
-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
+                              unsigned int len);
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          const struct Qdisc_ops *ops);
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
@@ -698,7 +699,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
        old = *pold;
        *pold = new;
        if (old != NULL) {
-               qdisc_tree_decrease_qlen(old, old->q.qlen);
+               qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
                qdisc_reset(old);
        }
        sch_tree_unlock(sch);
index a25fae3c8ad6901bb7ae408be4fdb87e0755185a..a2a7a81b2b0b01d956f4e48036fbc601eed5ad23 100644 (file)
@@ -740,14 +740,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
        return 0;
 }
 
-void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
+void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
+                              unsigned int len)
 {
        const struct Qdisc_class_ops *cops;
        unsigned long cl;
        u32 parentid;
        int drops;
 
-       if (n == 0)
+       if (n == 0 && len == 0)
                return;
        drops = max_t(int, n, 0);
        while ((parentid = sch->parent)) {
@@ -766,10 +767,11 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
                        cops->put(sch, cl);
                }
                sch->q.qlen -= n;
+               sch->qstats.backlog -= len;
                __qdisc_qstats_drop(sch, drops);
        }
 }
-EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
+EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
 
 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
                               struct nlmsghdr *n, u32 clid,
index 17ad79ebd720bcb2700e3843acd78926ca7b421f..f6e7a60012b1713e53d3d01d2cda29eb87105b9b 100644 (file)
@@ -1909,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = (struct cbq_class *)arg;
-       unsigned int qlen;
+       unsigned int qlen, backlog;
 
        if (cl->filters || cl->children || cl == &q->link)
                return -EBUSY;
@@ -1917,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
        sch_tree_lock(sch);
 
        qlen = cl->q->q.qlen;
+       backlog = cl->q->qstats.backlog;
        qdisc_reset(cl->q);
-       qdisc_tree_decrease_qlen(cl->q, qlen);
+       qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
 
        if (cl->next_alive)
                cbq_deactivate_class(cl);
index c009eb9045cef48adbe974e9d7d505e242213974..3f6437db9b0fe2638b2ee0b621884509d8b2c242 100644 (file)
@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
                choke_zap_tail_holes(q);
 
        qdisc_qstats_backlog_dec(sch, skb);
+       qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
        qdisc_drop(skb, sch);
-       qdisc_tree_decrease_qlen(sch, 1);
        --sch->q.qlen;
 }
 
@@ -449,6 +449,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
                old = q->tab;
                if (old) {
                        unsigned int oqlen = sch->q.qlen, tail = 0;
+                       unsigned dropped = 0;
 
                        while (q->head != q->tail) {
                                struct sk_buff *skb = q->tab[q->head];
@@ -460,11 +461,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
                                        ntab[tail++] = skb;
                                        continue;
                                }
+                               dropped += qdisc_pkt_len(skb);
                                qdisc_qstats_backlog_dec(sch, skb);
                                --sch->q.qlen;
                                qdisc_drop(skb, sch);
                        }
-                       qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
+                       qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
                        q->head = 0;
                        q->tail = tail;
                }
index de28f8e968e8176ac7630a1e6fcccb45ad295f5d..0d60ea5a5bb628954d1346b0ae90e81d63393203 100644 (file)
@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
 
        skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
 
-       /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+       /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
         * or HTB crashes. Defer it for next round.
         */
        if (q->stats.drop_count && sch->q.qlen) {
-               qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
+               qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
                q->stats.drop_count = 0;
+               q->stats.drop_len = 0;
        }
        if (skb)
                qdisc_bstats_update(sch, skb);
@@ -115,7 +116,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
 {
        struct codel_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_CODEL_MAX + 1];
-       unsigned int qlen;
+       unsigned int qlen, dropped = 0;
        int err;
 
        if (!opt)
@@ -149,10 +150,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
        while (sch->q.qlen > sch->limit) {
                struct sk_buff *skb = __skb_dequeue(&sch->q);
 
+               dropped += qdisc_pkt_len(skb);
                qdisc_qstats_backlog_dec(sch, skb);
                qdisc_drop(skb, sch);
        }
-       qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+       qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
 
        sch_tree_unlock(sch);
        return 0;
index d4b3f82758b475c2881b0528293593d363b62b77..e599803caa1efc6b285f13f2f91c6ef912f4e16b 100644 (file)
@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
 static void drr_purge_queue(struct drr_class *cl)
 {
        unsigned int len = cl->qdisc->q.qlen;
+       unsigned int backlog = cl->qdisc->qstats.backlog;
 
        qdisc_reset(cl->qdisc);
-       qdisc_tree_decrease_qlen(cl->qdisc, len);
+       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
 }
 
 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
index cbd7e1fd23b41bb7ba0bb348c3a1a287652cca93..01fe9d532075e162a8e328a15515d18d060a9580 100644 (file)
@@ -644,6 +644,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
        struct fq_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_FQ_MAX + 1];
        int err, drop_count = 0;
+       unsigned drop_len = 0;
        u32 fq_log;
 
        if (!opt)
@@ -709,10 +710,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
 
                if (!skb)
                        break;
+               drop_len += qdisc_pkt_len(skb);
                kfree_skb(skb);
                drop_count++;
        }
-       qdisc_tree_decrease_qlen(sch, drop_count);
+       qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
 
        sch_tree_unlock(sch);
        return err;
index 1e52decb7b59cf0b4173d0f17efdab8fefee5f26..8d21f1b5d5b9dbd483a54ec471a72c3303a78383 100644 (file)
@@ -173,7 +173,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
-       unsigned int idx;
+       unsigned int idx, prev_backlog;
        struct fq_codel_flow *flow;
        int uninitialized_var(ret);
 
@@ -201,6 +201,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (++sch->q.qlen <= sch->limit)
                return NET_XMIT_SUCCESS;
 
+       prev_backlog = sch->qstats.backlog;
        q->drop_overlimit++;
        /* Return Congestion Notification only if we dropped a packet
         * from this flow.
@@ -209,7 +210,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                return NET_XMIT_CN;
 
        /* As we dropped a packet, better let upper stack know this */
-       qdisc_tree_decrease_qlen(sch, 1);
+       qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
        return NET_XMIT_SUCCESS;
 }
 
@@ -239,6 +240,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
        struct fq_codel_flow *flow;
        struct list_head *head;
        u32 prev_drop_count, prev_ecn_mark;
+       unsigned int prev_backlog;
 
 begin:
        head = &q->new_flows;
@@ -257,6 +259,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
 
        prev_drop_count = q->cstats.drop_count;
        prev_ecn_mark = q->cstats.ecn_mark;
+       prev_backlog = sch->qstats.backlog;
 
        skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
                            dequeue);
@@ -274,12 +277,14 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
        }
        qdisc_bstats_update(sch, skb);
        flow->deficit -= qdisc_pkt_len(skb);
-       /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+       /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
         * or HTB crashes. Defer it for next round.
         */
        if (q->cstats.drop_count && sch->q.qlen) {
-               qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+               qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
+                                         q->cstats.drop_len);
                q->cstats.drop_count = 0;
+               q->cstats.drop_len = 0;
        }
        return skb;
 }
@@ -347,11 +352,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
        while (sch->q.qlen > sch->limit) {
                struct sk_buff *skb = fq_codel_dequeue(sch);
 
+               q->cstats.drop_len += qdisc_pkt_len(skb);
                kfree_skb(skb);
                q->cstats.drop_count++;
        }
-       qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+       qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
        q->cstats.drop_count = 0;
+       q->cstats.drop_len = 0;
 
        sch_tree_unlock(sch);
        return 0;
index 134f7d2116c961fd0d2d8495ab834bb329765b02..d3e21dac8b40e9cd1a75e5fff65c7585141d5888 100644 (file)
@@ -895,9 +895,10 @@ static void
 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
 {
        unsigned int len = cl->qdisc->q.qlen;
+       unsigned int backlog = cl->qdisc->qstats.backlog;
 
        qdisc_reset(cl->qdisc);
-       qdisc_tree_decrease_qlen(cl->qdisc, len);
+       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
 }
 
 static void
index 15d3aabfe2506c1483a2cdba0fe8425d152d2736..792c6f330f779c199bd505fe9d822d797599a94c 100644 (file)
@@ -390,6 +390,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        struct hhf_sched_data *q = qdisc_priv(sch);
        enum wdrr_bucket_idx idx;
        struct wdrr_bucket *bucket;
+       unsigned int prev_backlog;
 
        idx = hhf_classify(skb, sch);
 
@@ -417,6 +418,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (++sch->q.qlen <= sch->limit)
                return NET_XMIT_SUCCESS;
 
+       prev_backlog = sch->qstats.backlog;
        q->drop_overlimit++;
        /* Return Congestion Notification only if we dropped a packet from this
         * bucket.
@@ -425,7 +427,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                return NET_XMIT_CN;
 
        /* As we dropped a packet, better let upper stack know this. */
-       qdisc_tree_decrease_qlen(sch, 1);
+       qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
        return NET_XMIT_SUCCESS;
 }
 
@@ -535,7 +537,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
 {
        struct hhf_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_HHF_MAX + 1];
-       unsigned int qlen;
+       unsigned int qlen, prev_backlog;
        int err;
        u64 non_hh_quantum;
        u32 new_quantum = q->quantum;
@@ -585,12 +587,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
        }
 
        qlen = sch->q.qlen;
+       prev_backlog = sch->qstats.backlog;
        while (sch->q.qlen > sch->limit) {
                struct sk_buff *skb = hhf_dequeue(sch);
 
                kfree_skb(skb);
        }
-       qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+       qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
+                                 prev_backlog - sch->qstats.backlog);
 
        sch_tree_unlock(sch);
        return 0;
index 520ffe9eaddbc053fce403a70efc8f14c2999f0f..6b118b1587cabfb786b4428750e6afd02fa09250 100644 (file)
@@ -1267,7 +1267,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 {
        struct htb_sched *q = qdisc_priv(sch);
        struct htb_class *cl = (struct htb_class *)arg;
-       unsigned int qlen;
        struct Qdisc *new_q = NULL;
        int last_child = 0;
 
@@ -1287,9 +1286,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
        sch_tree_lock(sch);
 
        if (!cl->level) {
-               qlen = cl->un.leaf.q->q.qlen;
+               unsigned int qlen = cl->un.leaf.q->q.qlen;
+               unsigned int backlog = cl->un.leaf.q->qstats.backlog;
+
                qdisc_reset(cl->un.leaf.q);
-               qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
+               qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
        }
 
        /* delete from hash and active; remainder in destroy_class */
@@ -1423,10 +1424,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                sch_tree_lock(sch);
                if (parent && !parent->level) {
                        unsigned int qlen = parent->un.leaf.q->q.qlen;
+                       unsigned int backlog = parent->un.leaf.q->qstats.backlog;
 
                        /* turn parent into inner node */
                        qdisc_reset(parent->un.leaf.q);
-                       qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
+                       qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
                        qdisc_destroy(parent->un.leaf.q);
                        if (parent->prio_activity)
                                htb_deactivate(q, parent);
index f36ff83fac345d2ece85680580f996208ce951a7..23437d62a8dbf0f93eff242987f16bd3a89f5fe7 100644 (file)
@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
                if (q->queues[i] != &noop_qdisc) {
                        struct Qdisc *child = q->queues[i];
                        q->queues[i] = &noop_qdisc;
-                       qdisc_tree_decrease_qlen(child, child->q.qlen);
+                       qdisc_tree_reduce_backlog(child, child->q.qlen,
+                                                 child->qstats.backlog);
                        qdisc_destroy(child);
                }
        }
@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
                                q->queues[i] = child;
 
                                if (old != &noop_qdisc) {
-                                       qdisc_tree_decrease_qlen(old,
-                                                                old->q.qlen);
+                                       qdisc_tree_reduce_backlog(old,
+                                                                 old->q.qlen,
+                                                                 old->qstats.backlog);
                                        qdisc_destroy(old);
                                }
                                sch_tree_unlock(sch);
index d730ea0f2fab4664774e2c34e58d976fe2aebc8f..fac07d534fb7b2864d73e67aa736df9e956e3187 100644 (file)
@@ -610,7 +610,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
                                if (unlikely(err != NET_XMIT_SUCCESS)) {
                                        if (net_xmit_drop_count(err)) {
                                                qdisc_qstats_drop(sch);
-                                               qdisc_tree_decrease_qlen(sch, 1);
+                                               qdisc_tree_reduce_backlog(sch, 1,
+                                                                         qdisc_pkt_len(skb));
                                        }
                                }
                                goto tfifo_dequeue;
index b783a446d884d85af054c9a139f91a63a162bc9f..71ae3b9629f94f8520e557f21962fc9ab34366fb 100644 (file)
@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
 {
        struct pie_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_PIE_MAX + 1];
-       unsigned int qlen;
+       unsigned int qlen, dropped = 0;
        int err;
 
        if (!opt)
@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
        while (sch->q.qlen > sch->limit) {
                struct sk_buff *skb = __skb_dequeue(&sch->q);
 
+               dropped += qdisc_pkt_len(skb);
                qdisc_qstats_backlog_dec(sch, skb);
                qdisc_drop(skb, sch);
        }
-       qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+       qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
 
        sch_tree_unlock(sch);
        return 0;
index a677f543a25c927c2f5a3848b301a473f222530e..e671b1a4e815a4dc521a22f9defa418617451f33 100644 (file)
@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
                struct Qdisc *child = q->queues[i];
                q->queues[i] = &noop_qdisc;
                if (child != &noop_qdisc) {
-                       qdisc_tree_decrease_qlen(child, child->q.qlen);
+                       qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
                        qdisc_destroy(child);
                }
        }
@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
                                q->queues[i] = child;
 
                                if (old != &noop_qdisc) {
-                                       qdisc_tree_decrease_qlen(old,
-                                                                old->q.qlen);
+                                       qdisc_tree_reduce_backlog(old,
+                                                                 old->q.qlen,
+                                                                 old->qstats.backlog);
                                        qdisc_destroy(old);
                                }
                                sch_tree_unlock(sch);
index 6fea3209a7c6cbc803eb0a0e11ed8ca302703bf7..e2b8fd47008bba731c2e45393c52c969eb31ae8f 100644 (file)
@@ -221,9 +221,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
 static void qfq_purge_queue(struct qfq_class *cl)
 {
        unsigned int len = cl->qdisc->q.qlen;
+       unsigned int backlog = cl->qdisc->qstats.backlog;
 
        qdisc_reset(cl->qdisc);
-       qdisc_tree_decrease_qlen(cl->qdisc, len);
+       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
 }
 
 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
index d5abcee454d8c12ee960853668695550f5b458ba..8c0508c0e287742a8fbbcbf49134e76e9e52b807 100644 (file)
@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        q->flags = ctl->flags;
        q->limit = ctl->limit;
        if (child) {
-               qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+                                         q->qdisc->qstats.backlog);
                qdisc_destroy(q->qdisc);
                q->qdisc = child;
        }
index fa7f0a36cc31b7f63eb434f914255567f48a654d..e1d634e3c25568e3af8de404a489ab47e5622919 100644 (file)
@@ -518,7 +518,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
 
        sch_tree_lock(sch);
 
-       qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+       qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+                                 q->qdisc->qstats.backlog);
        qdisc_destroy(q->qdisc);
        q->qdisc = child;
 
index b877140beda5573b8d2dbe4a701bf24355ba7229..4417fb25166f22763ea6fa86d8645cbc7837d806 100644 (file)
@@ -369,7 +369,7 @@ static int
 sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
-       unsigned int hash;
+       unsigned int hash, dropped;
        sfq_index x, qlen;
        struct sfq_slot *slot;
        int uninitialized_var(ret);
@@ -484,7 +484,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                return NET_XMIT_SUCCESS;
 
        qlen = slot->qlen;
-       sfq_drop(sch);
+       dropped = sfq_drop(sch);
        /* Return Congestion Notification only if we dropped a packet
         * from this flow.
         */
@@ -492,7 +492,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                return NET_XMIT_CN;
 
        /* As we dropped a packet, better let upper stack know this */
-       qdisc_tree_decrease_qlen(sch, 1);
+       qdisc_tree_reduce_backlog(sch, 1, dropped);
        return NET_XMIT_SUCCESS;
 }
 
@@ -560,6 +560,7 @@ static void sfq_rehash(struct Qdisc *sch)
        struct sfq_slot *slot;
        struct sk_buff_head list;
        int dropped = 0;
+       unsigned int drop_len = 0;
 
        __skb_queue_head_init(&list);
 
@@ -588,6 +589,7 @@ static void sfq_rehash(struct Qdisc *sch)
                        if (x >= SFQ_MAX_FLOWS) {
 drop:
                                qdisc_qstats_backlog_dec(sch, skb);
+                               drop_len += qdisc_pkt_len(skb);
                                kfree_skb(skb);
                                dropped++;
                                continue;
@@ -617,7 +619,7 @@ static void sfq_rehash(struct Qdisc *sch)
                }
        }
        sch->q.qlen -= dropped;
-       qdisc_tree_decrease_qlen(sch, dropped);
+       qdisc_tree_reduce_backlog(sch, dropped, drop_len);
 }
 
 static void sfq_perturbation(unsigned long arg)
@@ -641,7 +643,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        struct sfq_sched_data *q = qdisc_priv(sch);
        struct tc_sfq_qopt *ctl = nla_data(opt);
        struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
-       unsigned int qlen;
+       unsigned int qlen, dropped = 0;
        struct red_parms *p = NULL;
 
        if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
@@ -690,8 +692,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
 
        qlen = sch->q.qlen;
        while (sch->q.qlen > q->limit)
-               sfq_drop(sch);
-       qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+               dropped += sfq_drop(sch);
+       qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
 
        del_timer(&q->perturb_timer);
        if (q->perturb_period) {
index 56a1aef3495f43aa0bada3e7d46e303e5d237562..c2fbde742f37347d6974ed87368fd3af4425de74 100644 (file)
@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
        struct tbf_sched_data *q = qdisc_priv(sch);
        struct sk_buff *segs, *nskb;
        netdev_features_t features = netif_skb_features(skb);
+       unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
        int ret, nb;
 
        segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
                nskb = segs->next;
                segs->next = NULL;
                qdisc_skb_cb(segs)->pkt_len = segs->len;
+               len += segs->len;
                ret = qdisc_enqueue(segs, q->qdisc);
                if (ret != NET_XMIT_SUCCESS) {
                        if (net_xmit_drop_count(ret))
@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
        }
        sch->q.qlen += nb;
        if (nb > 1)
-               qdisc_tree_decrease_qlen(sch, 1 - nb);
+               qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
        consume_skb(skb);
        return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
 }
@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
 
        sch_tree_lock(sch);
        if (child) {
-               qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+                                         q->qdisc->qstats.backlog);
                qdisc_destroy(q->qdisc);
                q->qdisc = child;
        }