Merge branch 'net-netem-fix-issues-with-corrupting-GSO-frames'

Jakub Kicinski says:

====================
net: netem: fix issues with corrupting GSO frames

Corrupting GSO frames currently leads to crashes, due to skb use
after free.  These stem from the skb list handling - the segmented
skbs come back on a list, and this list is not properly unlinked
before enqueuing the segments.  Turns out this condition is made
very likely to occur because of another bug - in backlog accounting.
Segments are counted twice, which means qdisc's limit gets reached
leading to drops and making the use after free very likely to happen.

The bugs are fixed in order in which they were added to the tree.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-06-18 21:30:39 -04:00
commit e11e1007a1
1 changed files with 14 additions and 12 deletions

View File

@ -439,8 +439,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct netem_skb_cb *cb;
struct sk_buff *skb2;
struct sk_buff *segs = NULL;
unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
int nb = 0;
unsigned int prev_len = qdisc_pkt_len(skb);
int count = 1;
int rc = NET_XMIT_SUCCESS;
int rc_drop = NET_XMIT_DROP;
@ -494,16 +493,14 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
*/
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
if (skb_is_gso(skb)) {
segs = netem_segment(skb, sch, to_free);
if (!segs)
skb = netem_segment(skb, sch, to_free);
if (!skb)
return rc_drop;
} else {
segs = skb;
segs = skb->next;
skb_mark_not_on_list(skb);
qdisc_skb_cb(skb)->pkt_len = skb->len;
}
skb = segs;
segs = segs->next;
skb = skb_unshare(skb, GFP_ATOMIC);
if (unlikely(!skb)) {
qdisc_qstats_drop(sch);
@ -520,6 +517,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
if (unlikely(sch->q.qlen >= sch->limit)) {
/* re-link segs, so that qdisc_drop_all() frees them all */
skb->next = segs;
qdisc_drop_all(skb, sch, to_free);
return rc_drop;
}
@ -593,6 +592,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
finish_segs:
if (segs) {
unsigned int len, last_len;
int nb = 0;
len = skb->len;
while (segs) {
skb2 = segs->next;
skb_mark_not_on_list(segs);
@ -608,9 +612,7 @@ finish_segs:
}
segs = skb2;
}
sch->q.qlen += nb;
if (nb > 1)
qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
qdisc_tree_reduce_backlog(sch, -nb, prev_len - len);
}
return NET_XMIT_SUCCESS;
}