netfilter: ipset: move functions to ip_set_core.c.

Several inline functions in ip_set.h are only called in ip_set_core.c:
move them and remove inline function specifier.

Signed-off-by: Jeremy Sowden <jeremy@azazel.net>
Acked-by: Jozsef Kadlecsik <kadlec@netfilter.org>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
Jeremy Sowden 2019-10-03 20:56:04 +01:00 committed by Pablo Neira Ayuso
parent 94177f6e11
commit 2398a97688
2 changed files with 102 additions and 102 deletions

View File

@ -508,86 +508,9 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
*timeout = t; *timeout = t;
} }
static inline u32
ip_set_timeout_get(const unsigned long *timeout)
{
u32 t;
if (*timeout == IPSET_ELEM_PERMANENT)
return 0;
t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
/* Zero value in userspace means no timeout */
return t == 0 ? 1 : t;
}
void ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, void ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
const struct ip_set_ext *ext); const struct ip_set_ext *ext);
static inline void
ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
{
atomic64_add((long long)bytes, &(counter)->bytes);
}
static inline void
ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
{
atomic64_add((long long)packets, &(counter)->packets);
}
static inline u64
ip_set_get_bytes(const struct ip_set_counter *counter)
{
return (u64)atomic64_read(&(counter)->bytes);
}
static inline u64
ip_set_get_packets(const struct ip_set_counter *counter)
{
return (u64)atomic64_read(&(counter)->packets);
}
static inline bool
ip_set_match_counter(u64 counter, u64 match, u8 op)
{
switch (op) {
case IPSET_COUNTER_NONE:
return true;
case IPSET_COUNTER_EQ:
return counter == match;
case IPSET_COUNTER_NE:
return counter != match;
case IPSET_COUNTER_LT:
return counter < match;
case IPSET_COUNTER_GT:
return counter > match;
}
return false;
}
static inline void
ip_set_update_counter(struct ip_set_counter *counter,
const struct ip_set_ext *ext, u32 flags)
{
if (ext->packets != ULLONG_MAX &&
!(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
ip_set_add_bytes(ext->bytes, counter);
ip_set_add_packets(ext->packets, counter);
}
}
static inline bool
ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
{
return nla_put_net64(skb, IPSET_ATTR_BYTES,
cpu_to_be64(ip_set_get_bytes(counter)),
IPSET_ATTR_PAD) ||
nla_put_net64(skb, IPSET_ATTR_PACKETS,
cpu_to_be64(ip_set_get_packets(counter)),
IPSET_ATTR_PAD);
}
static inline void static inline void
ip_set_init_counter(struct ip_set_counter *counter, ip_set_init_counter(struct ip_set_counter *counter,
const struct ip_set_ext *ext) const struct ip_set_ext *ext)
@ -598,31 +521,6 @@ ip_set_init_counter(struct ip_set_counter *counter,
atomic64_set(&(counter)->packets, (long long)(ext->packets)); atomic64_set(&(counter)->packets, (long long)(ext->packets));
} }
static inline void
ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
mext->skbinfo = *skbinfo;
}
static inline bool
ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
{
/* Send nonzero parameters only */
return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
nla_put_net64(skb, IPSET_ATTR_SKBMARK,
cpu_to_be64((u64)skbinfo->skbmark << 32 |
skbinfo->skbmarkmask),
IPSET_ATTR_PAD)) ||
(skbinfo->skbprio &&
nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
cpu_to_be32(skbinfo->skbprio))) ||
(skbinfo->skbqueue &&
nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
cpu_to_be16(skbinfo->skbqueue)));
}
static inline void static inline void
ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
const struct ip_set_ext *ext) const struct ip_set_ext *ext)

View File

@ -325,6 +325,19 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
} }
EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6); EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
static u32
ip_set_timeout_get(const unsigned long *timeout)
{
u32 t;
if (*timeout == IPSET_ELEM_PERMANENT)
return 0;
t = jiffies_to_msecs(*timeout - jiffies) / MSEC_PER_SEC;
/* Zero value in userspace means no timeout */
return t == 0 ? 1 : t;
}
static char * static char *
ip_set_comment_uget(struct nlattr *tb) ip_set_comment_uget(struct nlattr *tb)
{ {
@ -510,6 +523,46 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
} }
EXPORT_SYMBOL_GPL(ip_set_get_extensions); EXPORT_SYMBOL_GPL(ip_set_get_extensions);
static u64
ip_set_get_bytes(const struct ip_set_counter *counter)
{
return (u64)atomic64_read(&(counter)->bytes);
}
static u64
ip_set_get_packets(const struct ip_set_counter *counter)
{
return (u64)atomic64_read(&(counter)->packets);
}
static bool
ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
{
return nla_put_net64(skb, IPSET_ATTR_BYTES,
cpu_to_be64(ip_set_get_bytes(counter)),
IPSET_ATTR_PAD) ||
nla_put_net64(skb, IPSET_ATTR_PACKETS,
cpu_to_be64(ip_set_get_packets(counter)),
IPSET_ATTR_PAD);
}
static bool
ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
{
/* Send nonzero parameters only */
return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
nla_put_net64(skb, IPSET_ATTR_SKBMARK,
cpu_to_be64((u64)skbinfo->skbmark << 32 |
skbinfo->skbmarkmask),
IPSET_ATTR_PAD)) ||
(skbinfo->skbprio &&
nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
cpu_to_be32(skbinfo->skbprio))) ||
(skbinfo->skbqueue &&
nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
cpu_to_be16(skbinfo->skbqueue)));
}
int int
ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
const void *e, bool active) const void *e, bool active)
@ -535,6 +588,55 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
} }
EXPORT_SYMBOL_GPL(ip_set_put_extensions); EXPORT_SYMBOL_GPL(ip_set_put_extensions);
static bool
ip_set_match_counter(u64 counter, u64 match, u8 op)
{
switch (op) {
case IPSET_COUNTER_NONE:
return true;
case IPSET_COUNTER_EQ:
return counter == match;
case IPSET_COUNTER_NE:
return counter != match;
case IPSET_COUNTER_LT:
return counter < match;
case IPSET_COUNTER_GT:
return counter > match;
}
return false;
}
static void
ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
{
atomic64_add((long long)bytes, &(counter)->bytes);
}
static void
ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
{
atomic64_add((long long)packets, &(counter)->packets);
}
static void
ip_set_update_counter(struct ip_set_counter *counter,
const struct ip_set_ext *ext, u32 flags)
{
if (ext->packets != ULLONG_MAX &&
!(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
ip_set_add_bytes(ext->bytes, counter);
ip_set_add_packets(ext->packets, counter);
}
}
static void
ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
mext->skbinfo = *skbinfo;
}
bool bool
ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext, ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags, void *data) struct ip_set_ext *mext, u32 flags, void *data)