Merge branch 'skb_list_cleanups'

David Miller says:

====================
SKB list handling cleanups

This is a preparatory patch series which cleans up various forms of
sloppy SKB list handling, and makes certain semantics explicit.

We are trying to eliminate code that directly accesses the SKB
list and SKB queue head next/prev members in any way.  It is
impossible to convert SKB queue head over the struct list_head
while such code exists.

This patch series does not eliminate all such code, only the simplest
cases.  A latter series will tackle the complicated ones.

A helper is added to make the "skb->next == NULL means not on a list"
rule explicit, and another is added to combine this with list_del().
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-09-10 10:07:01 -07:00
commit fd2b803fa8
36 changed files with 97 additions and 96 deletions

View File

@ -198,9 +198,9 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
if (skb) {
/* Continue processing fpdu */
if (skb->next == (struct sk_buff *)&nesqp->pau_list)
skb = skb_peek_next(skb, &nesqp->pau_list);
if (!skb)
goto out;
skb = skb->next;
processacks = false;
} else {
/* Starting a new one */
@ -553,12 +553,10 @@ static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct ne
if (skb_queue_len(&nesqp->pau_list) == 0) {
skb_queue_head(&nesqp->pau_list, skb);
} else {
tmpskb = nesqp->pau_list.next;
while (tmpskb != (struct sk_buff *)&nesqp->pau_list) {
skb_queue_walk(&nesqp->pau_list, tmpskb) {
cb = (struct nes_rskb_cb *)&tmpskb->cb[0];
if (before(seqnum, cb->seqnum))
break;
tmpskb = tmpskb->next;
}
skb_insert(tmpskb, skb, &nesqp->pau_list);
}

View File

@ -79,7 +79,7 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
int (*compare)(struct sk_buff *a, struct sk_buff *b))
{
struct sk_buff *pos, *insert = (struct sk_buff *)head;
struct sk_buff *pos, *insert = NULL;
skb_queue_reverse_walk(head, pos) {
const struct can_rx_offload_cb *cb_pos, *cb_new;
@ -99,8 +99,10 @@ static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buf
insert = pos;
break;
}
__skb_queue_after(head, insert, new);
if (!insert)
__skb_queue_head(head, new);
else
__skb_queue_after(head, insert, new);
}
static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)

View File

@ -2400,7 +2400,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
if (ppp->mrru == 0) /* do nothing until mrru is set */
return NULL;
head = list->next;
head = __skb_peek(list);
tail = NULL;
skb_queue_walk_safe(list, p, tmp) {
again:

View File

@ -3340,9 +3340,9 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
count = 0;
length = 0;
spin_lock_irqsave(&tqp->lock, flags);
for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
skb_queue_walk(tqp, skb) {
if (skb_is_gso(skb)) {
if (pkt_cnt) {
if (!skb_queue_is_first(tqp, skb)) {
/* handle previous packets first */
break;
}

View File

@ -576,7 +576,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
if (pktq->qlen == 1)
err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
pktq->next);
__skb_peek(pktq));
else if (!sdiodev->sg_support) {
glom_skb = brcmu_pkt_buf_get_skb(totlen);
if (!glom_skb)

View File

@ -2189,7 +2189,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
* length of the chain (including padding)
*/
if (bus->txglom)
brcmf_sdio_update_hwhdr(pktq->next->data, total_len);
brcmf_sdio_update_hwhdr(__skb_peek(pktq)->data, total_len);
return 0;
}

View File

@ -121,8 +121,8 @@ static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb)
}
if (unlikely(!target_skb)) {
if (priv->rx_end - last_addr >= len) {
target_skb = priv->tx_queue.prev;
if (!skb_queue_empty(&priv->tx_queue)) {
target_skb = skb_peek_tail(&priv->tx_queue);
if (target_skb) {
info = IEEE80211_SKB_CB(target_skb);
range = (void *)info->rate_driver_data;
target_addr = range->end_addr;

View File

@ -499,7 +499,7 @@ static void rtl8187b_status_cb(struct urb *urb)
if (cmd_type == 1) {
unsigned int pkt_rc, seq_no;
bool tok;
struct sk_buff *skb;
struct sk_buff *skb, *iter;
struct ieee80211_hdr *ieee80211hdr;
unsigned long flags;
@ -508,8 +508,9 @@ static void rtl8187b_status_cb(struct urb *urb)
seq_no = (val >> 16) & 0xFFF;
spin_lock_irqsave(&priv->b_tx_status.queue.lock, flags);
skb_queue_reverse_walk(&priv->b_tx_status.queue, skb) {
ieee80211hdr = (struct ieee80211_hdr *)skb->data;
skb = NULL;
skb_queue_reverse_walk(&priv->b_tx_status.queue, iter) {
ieee80211hdr = (struct ieee80211_hdr *)iter->data;
/*
* While testing, it was discovered that the seq_no
@ -522,10 +523,12 @@ static void rtl8187b_status_cb(struct urb *urb)
* it's unlikely we wrongly ack some sent data
*/
if ((le16_to_cpu(ieee80211hdr->seq_ctrl)
& 0xFFF) == seq_no)
& 0xFFF) == seq_no) {
skb = iter;
break;
}
}
if (skb != (struct sk_buff *) &priv->b_tx_status.queue) {
if (skb) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
__skb_unlink(skb, &priv->b_tx_status.queue);

View File

@ -150,15 +150,11 @@ static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
struct fcoe_rcv_info *fr;
struct sk_buff_head *list;
struct sk_buff *skb, *next;
struct sk_buff *head;
bg = &bnx2fc_global;
spin_lock_bh(&bg->fcoe_rx_list.lock);
list = &bg->fcoe_rx_list;
head = list->next;
for (skb = head; skb != (struct sk_buff *)list;
skb = next) {
next = skb->next;
skb_queue_walk_safe(list, skb, next) {
fr = fcoe_dev_from_skb(skb);
if (fr->fr_dev == lp) {
__skb_unlink(skb, list);

View File

@ -1149,7 +1149,7 @@ static enum reset_type _rtl92e_tx_check_stuck(struct net_device *dev)
if (skb_queue_len(&ring->queue) == 0) {
continue;
} else {
skb = (&ring->queue)->next;
skb = __skb_peek(&ring->queue);
tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
tcb_desc->nStuckCount++;

View File

@ -1339,6 +1339,17 @@ static inline void skb_zcopy_abort(struct sk_buff *skb)
}
}
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = NULL;
}
static inline void skb_list_del_init(struct sk_buff *skb)
{
__list_del_entry(&skb->list);
skb_mark_not_on_list(skb);
}
/**
* skb_queue_empty - check if a queue is empty
* @list: queue head
@ -1592,6 +1603,17 @@ static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
return skb;
}
/**
* __skb_peek - peek at the head of a non-empty &sk_buff_head
* @list_: list to peek at
*
* Like skb_peek(), but the caller knows that the list is not empty.
*/
static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
{
return list_->next;
}
/**
* skb_peek_next - peek skb following the given one from a queue
* @skb: skb to start from

View File

@ -828,8 +828,8 @@ static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
qh->qlen = 0;
}
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
struct qdisc_skb_head *qh)
static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
struct qdisc_skb_head *qh)
{
struct sk_buff *last = qh->tail;
@ -842,14 +842,24 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
qh->head = skb;
}
qh->qlen++;
qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
}
static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
{
return __qdisc_enqueue_tail(skb, sch, &sch->q);
__qdisc_enqueue_tail(skb, &sch->q);
qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
}
static inline void __qdisc_enqueue_head(struct sk_buff *skb,
struct qdisc_skb_head *qh)
{
skb->next = qh->head;
if (!qh->head)
qh->tail = skb;
qh->head = skb;
qh->qlen++;
}
static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)

View File

@ -3231,7 +3231,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
while (skb) {
struct sk_buff *next = skb->next;
skb->next = NULL;
skb_mark_not_on_list(skb);
rc = xmit_one(skb, dev, txq, next != NULL);
if (unlikely(!dev_xmit_complete(rc))) {
skb->next = next;
@ -3331,7 +3331,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
for (; skb != NULL; skb = next) {
next = skb->next;
skb->next = NULL;
skb_mark_not_on_list(skb);
/* in case skb wont be segmented, point to itself */
skb->prev = skb;
@ -5295,8 +5295,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
list_for_each_entry_safe_reverse(skb, p, head, list) {
if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
return;
list_del(&skb->list);
skb->next = NULL;
skb_list_del_init(skb);
napi_gro_complete(skb);
napi->gro_hash[index].count--;
}
@ -5481,8 +5480,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
if (pp) {
list_del(&pp->list);
pp->next = NULL;
skb_list_del_init(pp);
napi_gro_complete(pp);
napi->gro_hash[hash].count--;
}

View File

@ -2332,7 +2332,7 @@ static void __release_sock(struct sock *sk)
next = skb->next;
prefetch(next);
WARN_ON_ONCE(skb_dst_is_noref(skb));
skb->next = NULL;
skb_mark_not_on_list(skb);
sk_backlog_rcv(sk, skb);
cond_resched();

View File

@ -260,7 +260,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
}
sub_frag_mem_limit(fq->q.net, sum_truesize);
head->next = NULL;
skb_mark_not_on_list(head);
head->dev = ldev;
head->tstamp = fq->q.stamp;

View File

@ -623,7 +623,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
sub_frag_mem_limit(qp->q.net, head->truesize);
*nextp = NULL;
head->next = NULL;
skb_mark_not_on_list(head);
head->prev = NULL;
head->dev = dev;
head->tstamp = qp->q.stamp;

View File

@ -531,11 +531,7 @@ static void ip_sublist_rcv_finish(struct list_head *head)
struct sk_buff *skb, *next;
list_for_each_entry_safe(skb, next, head, list) {
list_del(&skb->list);
/* Handle ip{6}_forward case, as sch_direct_xmit have
* another kind of SKB-list usage (see validate_xmit_skb_list)
*/
skb->next = NULL;
skb_list_del_init(skb);
dst_input(skb);
}
}

View File

@ -278,7 +278,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
struct sk_buff *nskb = segs->next;
int err;
segs->next = NULL;
skb_mark_not_on_list(segs);
err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
if (err && ret == 0)
@ -684,7 +684,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb = frag;
frag = skb->next;
skb->next = NULL;
skb_mark_not_on_list(skb);
}
if (err == 0) {

View File

@ -727,7 +727,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb = frag;
frag = skb->next;
skb->next = NULL;
skb_mark_not_on_list(skb);
}
kfree(tmp_hdr);

View File

@ -449,7 +449,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
sub_frag_mem_limit(fq->q.net, head->truesize);
head->ignore_df = 1;
head->next = NULL;
skb_mark_not_on_list(head);
head->dev = dev;
head->tstamp = fq->q.stamp;
ipv6_hdr(head)->payload_len = htons(payload_len);

View File

@ -388,7 +388,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
}
sub_frag_mem_limit(fq->q.net, sum_truesize);
head->next = NULL;
skb_mark_not_on_list(head);
head->dev = dev;
head->tstamp = fq->q.stamp;
ipv6_hdr(head)->payload_len = htons(payload_len);

View File

@ -2077,6 +2077,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
idx = sdata->fragment_next;
for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
struct ieee80211_hdr *f_hdr;
struct sk_buff *f_skb;
idx--;
if (idx < 0)
@ -2088,7 +2089,8 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
entry->last_frag + 1 != frag)
continue;
f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
f_skb = __skb_peek(&entry->skb_list);
f_hdr = (struct ieee80211_hdr *) f_skb->data;
/*
* Check ftype and addresses are equal, else check next fragment

View File

@ -764,7 +764,7 @@ __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
return ret;
}
skb->next = NULL;
skb_mark_not_on_list(skb);
entry_seg = nf_queue_entry_dup(entry);
if (entry_seg) {

View File

@ -259,7 +259,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
while (list) {
skb = list;
list = skb->next;
skb->next = NULL;
skb_mark_not_on_list(skb);
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
}
}

View File

@ -812,7 +812,7 @@ static struct sk_buff *dequeue_head(struct cake_flow *flow)
if (skb) {
flow->head = skb->next;
skb->next = NULL;
skb_mark_not_on_list(skb);
}
return skb;
@ -1252,7 +1252,7 @@ found:
else
flow->head = elig_ack->next;
elig_ack->next = NULL;
skb_mark_not_on_list(elig_ack);
return elig_ack;
}
@ -1675,7 +1675,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
while (segs) {
nskb = segs->next;
segs->next = NULL;
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
cobalt_set_enqueue_time(segs, now);
get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,

View File

@ -319,7 +319,7 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
if (skb) {
flow->head = skb->next;
skb->next = NULL;
skb_mark_not_on_list(skb);
flow->qlen--;
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;

View File

@ -124,7 +124,7 @@ static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
struct sk_buff *skb = flow->head;
flow->head = skb->next;
skb->next = NULL;
skb_mark_not_on_list(skb);
return skb;
}

View File

@ -184,7 +184,7 @@ static void try_bulk_dequeue_skb(struct Qdisc *q,
skb = nskb;
(*packets)++; /* GSO counts as one pkt */
}
skb->next = NULL;
skb_mark_not_on_list(skb);
}
/* This variant of try_bulk_dequeue_skb() makes sure
@ -210,7 +210,7 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
skb = nskb;
} while (++cnt < 8);
(*packets) += cnt;
skb->next = NULL;
skb_mark_not_on_list(skb);
}
/* Note that dequeue_skb can possibly return a SKB list (via skb->next).

View File

@ -330,7 +330,7 @@ static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
struct sk_buff *skb = bucket->head;
bucket->head = skb->next;
skb->next = NULL;
skb_mark_not_on_list(skb);
return skb;
}

View File

@ -577,22 +577,6 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
cl->prio_activity = 0;
}
static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
struct qdisc_skb_head *qh)
{
struct sk_buff *last = qh->tail;
if (last) {
skb->next = NULL;
last->next = skb;
qh->tail = skb;
} else {
qh->tail = skb;
qh->head = skb;
}
qh->qlen++;
}
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
@ -603,7 +587,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (cl == HTB_DIRECT) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen) {
htb_enqueue_tail(skb, sch, &q->direct_queue);
__qdisc_enqueue_tail(skb, &q->direct_queue);
q->direct_pkts++;
} else {
return qdisc_drop(skb, sch, to_free);

View File

@ -412,16 +412,6 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
return segs;
}
static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
{
skb->next = qh->head;
if (!qh->head)
qh->tail = skb;
qh->head = skb;
qh->qlen++;
}
/*
* Insert one skb into qdisc.
* Note: parent depends on return value to account for queue length.
@ -570,7 +560,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
cb->time_to_send = ktime_get_ns();
q->counter = 0;
netem_enqueue_skb_head(&sch->q, skb);
__qdisc_enqueue_head(skb, &sch->q);
sch->qstats.requeues++;
}
@ -578,7 +568,7 @@ finish_segs:
if (segs) {
while (segs) {
skb2 = segs->next;
segs->next = NULL;
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
last_len = segs->len;
rc = qdisc_enqueue(segs, sch, to_free);

View File

@ -162,7 +162,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
nb = 0;
while (segs) {
nskb = segs->next;
segs->next = NULL;
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc, to_free);

View File

@ -459,7 +459,7 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul
* element in the queue, then count it towards
* possible PD.
*/
if (pos == ulpq->reasm.next) {
if (skb_queue_is_first(&ulpq->reasm, pos)) {
pd_first = pos;
pd_last = pos;
pd_len = pos->len;

View File

@ -577,7 +577,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
rcu_dereference_rtnl(orig_dev->tipc_ptr);
if (likely(b && test_bit(0, &b->up) &&
(skb->pkt_type <= PACKET_MULTICAST))) {
skb->next = NULL;
skb_mark_not_on_list(skb);
tipc_rcv(dev_net(b->pt.dev), skb, b);
rcu_read_unlock();
return NET_RX_SUCCESS;

View File

@ -99,7 +99,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
do {
struct sk_buff *nskb = skb2->next;
skb2->next = NULL;
skb_mark_not_on_list(skb2);
xo = xfrm_offload(skb2);
xo->flags |= XFRM_DEV_RESUME;

View File

@ -189,7 +189,7 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
struct sk_buff *nskb = segs->next;
int err;
segs->next = NULL;
skb_mark_not_on_list(segs);
err = xfrm_output2(net, sk, segs);
if (unlikely(err)) {