sched: act: ife: migrate to use per-cpu counters

This patch migrates the current counter handling which is protected by a
spinlock to a per-cpu counter handling. This reduce the time where the
spinlock is being held.

Signed-off-by: Alexander Aring <aring@mojatatu.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Alexander Aring 2017-10-11 17:16:07 -04:00 committed by David S. Miller
parent 734534e9a8
commit ced273eacf
1 changed files with 11 additions and 18 deletions

View File

@ -477,7 +477,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (!exists) { if (!exists) {
ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops, ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
bind, false); bind, true);
if (ret) if (ret)
return ret; return ret;
ret = ACT_P_CREATED; ret = ACT_P_CREATED;
@ -638,19 +638,15 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
u8 *tlv_data; u8 *tlv_data;
u16 metalen; u16 metalen;
spin_lock(&ife->tcf_lock); bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
bstats_update(&ife->tcf_bstats, skb);
tcf_lastuse_update(&ife->tcf_tm); tcf_lastuse_update(&ife->tcf_tm);
spin_unlock(&ife->tcf_lock);
if (skb_at_tc_ingress(skb)) if (skb_at_tc_ingress(skb))
skb_push(skb, skb->dev->hard_header_len); skb_push(skb, skb->dev->hard_header_len);
tlv_data = ife_decode(skb, &metalen); tlv_data = ife_decode(skb, &metalen);
if (unlikely(!tlv_data)) { if (unlikely(!tlv_data)) {
spin_lock(&ife->tcf_lock); qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
return TC_ACT_SHOT; return TC_ACT_SHOT;
} }
@ -668,14 +664,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
*/ */
pr_info_ratelimited("Unknown metaid %d dlen %d\n", pr_info_ratelimited("Unknown metaid %d dlen %d\n",
mtype, dlen); mtype, dlen);
ife->tcf_qstats.overlimits++; qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
} }
} }
if (WARN_ON(tlv_data != ifehdr_end)) { if (WARN_ON(tlv_data != ifehdr_end)) {
spin_lock(&ife->tcf_lock); qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
return TC_ACT_SHOT; return TC_ACT_SHOT;
} }
@ -727,23 +721,20 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
exceed_mtu = true; exceed_mtu = true;
} }
spin_lock(&ife->tcf_lock); bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
bstats_update(&ife->tcf_bstats, skb);
tcf_lastuse_update(&ife->tcf_tm); tcf_lastuse_update(&ife->tcf_tm);
if (!metalen) { /* no metadata to send */ if (!metalen) { /* no metadata to send */
/* abuse overlimits to count when we allow packet /* abuse overlimits to count when we allow packet
* with no metadata * with no metadata
*/ */
ife->tcf_qstats.overlimits++; qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
spin_unlock(&ife->tcf_lock);
return action; return action;
} }
/* could be stupid policy setup or mtu config /* could be stupid policy setup or mtu config
* so lets be conservative.. */ * so lets be conservative.. */
if ((action == TC_ACT_SHOT) || exceed_mtu) { if ((action == TC_ACT_SHOT) || exceed_mtu) {
ife->tcf_qstats.drops++; qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
spin_unlock(&ife->tcf_lock);
return TC_ACT_SHOT; return TC_ACT_SHOT;
} }
@ -752,6 +743,8 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
ife_meta = ife_encode(skb, metalen); ife_meta = ife_encode(skb, metalen);
spin_lock(&ife->tcf_lock);
/* XXX: we dont have a clever way of telling encode to /* XXX: we dont have a clever way of telling encode to
* not repeat some of the computations that are done by * not repeat some of the computations that are done by
* ops->presence_check... * ops->presence_check...
@ -763,8 +756,8 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
} }
if (err < 0) { if (err < 0) {
/* too corrupt to keep around if overwritten */ /* too corrupt to keep around if overwritten */
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock); spin_unlock(&ife->tcf_lock);
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT; return TC_ACT_SHOT;
} }
skboff += err; skboff += err;