[Cake] [PATCH 2/4] sched: Fix detection of empty queues in child qdiscs

Dave Taht dave.taht at gmail.com
Mon Jan 7 14:57:45 EST 2019


awesome. I'd (rarely) seen this bug in drr and qfq and never solved
it, thus going for the all-in-one fq_codel....

On Mon, Jan 7, 2019 at 11:48 AM Toke Høiland-Jørgensen <toke at toke.dk> wrote:
>
> From: Toke Høiland-Jørgensen <toke at redhat.com>
>
> Several qdiscs check on enqueue whether the packet was enqueued to a class
> with an empty queue, in which case the class is activated. This is done by
> checking if the qlen is exactly 1 after enqueue. However, if GSO splitting
> is enabled in the child qdisc, a single packet can result in a qlen longer
> than 1. This means the activation check fails, leading to a stalled queue.
>
> Fix this by checking if the queue is empty *before* enqueue, and running
> the activation logic if this was the case.
>
> Reported-by: Pete Heist <pete at heistp.net>
> Signed-off-by: Toke Høiland-Jørgensen <toke at redhat.com>
> ---
>  net/sched/sch_drr.c  | 4 +++-
>  net/sched/sch_hfsc.c | 4 +++-
>  net/sched/sch_qfq.c  | 4 +++-
>  3 files changed, 9 insertions(+), 3 deletions(-)
>
> diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
> index feaf47178653..09b800991065 100644
> --- a/net/sched/sch_drr.c
> +++ b/net/sched/sch_drr.c
> @@ -354,6 +354,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>         struct drr_sched *q = qdisc_priv(sch);
>         struct drr_class *cl;
>         int err = 0;
> +       bool first;
>
>         cl = drr_classify(skb, sch, &err);
>         if (cl == NULL) {
> @@ -363,6 +364,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>                 return err;
>         }
>
> +       first = !cl->qdisc->q.qlen;
>         err = qdisc_enqueue(skb, cl->qdisc, to_free);
>         if (unlikely(err != NET_XMIT_SUCCESS)) {
>                 if (net_xmit_drop_count(err)) {
> @@ -372,7 +374,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>                 return err;
>         }
>
> -       if (cl->qdisc->q.qlen == 1) {
> +       if (first) {
>                 list_add_tail(&cl->alist, &q->active);
>                 cl->deficit = cl->quantum;
>         }
> diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
> index 6bb8f73a8473..24cc220a3218 100644
> --- a/net/sched/sch_hfsc.c
> +++ b/net/sched/sch_hfsc.c
> @@ -1542,6 +1542,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
>         unsigned int len = qdisc_pkt_len(skb);
>         struct hfsc_class *cl;
>         int uninitialized_var(err);
> +       bool first;
>
>         cl = hfsc_classify(skb, sch, &err);
>         if (cl == NULL) {
> @@ -1551,6 +1552,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
>                 return err;
>         }
>
> +       first = !cl->qdisc->q.qlen;
>         err = qdisc_enqueue(skb, cl->qdisc, to_free);
>         if (unlikely(err != NET_XMIT_SUCCESS)) {
>                 if (net_xmit_drop_count(err)) {
> @@ -1560,7 +1562,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
>                 return err;
>         }
>
> -       if (cl->qdisc->q.qlen == 1) {
> +       if (first) {
>                 if (cl->cl_flags & HFSC_RSC)
>                         init_ed(cl, len);
>                 if (cl->cl_flags & HFSC_FSC)
> diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
> index 8d5e55d5bed2..29f5c4a24688 100644
> --- a/net/sched/sch_qfq.c
> +++ b/net/sched/sch_qfq.c
> @@ -1215,6 +1215,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>         struct qfq_class *cl;
>         struct qfq_aggregate *agg;
>         int err = 0;
> +       bool first;
>
>         cl = qfq_classify(skb, sch, &err);
>         if (cl == NULL) {
> @@ -1236,6 +1237,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>         }
>
>         gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
> +       first = !cl->qdisc->q.qlen;
>         err = qdisc_enqueue(skb, cl->qdisc, to_free);
>         if (unlikely(err != NET_XMIT_SUCCESS)) {
>                 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
> @@ -1253,7 +1255,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>
>         agg = cl->agg;
>         /* if the queue was not empty, then done here */
> -       if (cl->qdisc->q.qlen != 1) {
> +       if (!first) {
>                 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
>                     list_first_entry(&agg->active, struct qfq_class, alist)
>                     == cl && cl->deficit < len)
> --
> 2.20.1
>
> _______________________________________________
> Cake mailing list
> Cake at lists.bufferbloat.net
> https://lists.bufferbloat.net/listinfo/cake



-- 

Dave Täht
CTO, TekLibre, LLC
http://www.teklibre.com
Tel: 1-831-205-9740


More information about the Cake mailing list