2006-11-29 02:35:01 +01:00
|
|
|
/*
|
|
|
|
* connection tracking event cache.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _NF_CONNTRACK_ECACHE_H
|
|
|
|
#define _NF_CONNTRACK_ECACHE_H
|
|
|
|
#include <net/netfilter/nf_conntrack.h>
|
|
|
|
|
2008-10-08 11:35:07 +02:00
|
|
|
#include <net/net_namespace.h>
|
2006-11-29 02:35:01 +01:00
|
|
|
#include <net/netfilter/nf_conntrack_expect.h>
|
2009-06-13 12:26:29 +02:00
|
|
|
#include <linux/netfilter/nf_conntrack_common.h>
|
|
|
|
#include <linux/netfilter/nf_conntrack_tuple_common.h>
|
|
|
|
#include <net/netfilter/nf_conntrack_extend.h>
|
2006-11-29 02:35:01 +01:00
|
|
|
|
2009-06-13 12:26:29 +02:00
|
|
|
struct nf_conntrack_ecache {
|
2010-02-03 13:51:51 +01:00
|
|
|
unsigned long cache; /* bitops want long */
|
|
|
|
unsigned long missed; /* missed events */
|
|
|
|
u16 ctmask; /* bitmask of ct events to be delivered */
|
|
|
|
u16 expmask; /* bitmask of expect events to be delivered */
|
|
|
|
u32 pid; /* netlink pid of destroyer */
|
2009-06-13 12:26:29 +02:00
|
|
|
};
|
2009-06-02 20:08:44 +02:00
|
|
|
|
2009-06-13 12:26:29 +02:00
|
|
|
static inline struct nf_conntrack_ecache *
|
|
|
|
nf_ct_ecache_find(const struct nf_conn *ct)
|
|
|
|
{
|
2010-11-15 12:23:24 +01:00
|
|
|
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
2009-06-13 12:26:29 +02:00
|
|
|
return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE);
|
2010-11-15 12:23:24 +01:00
|
|
|
#else
|
|
|
|
return NULL;
|
|
|
|
#endif
|
2009-06-13 12:26:29 +02:00
|
|
|
}
|
2009-06-02 20:08:44 +02:00
|
|
|
|
2009-06-13 12:26:29 +02:00
|
|
|
static inline struct nf_conntrack_ecache *
|
2010-02-03 13:51:51 +01:00
|
|
|
nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
|
2009-06-13 12:26:29 +02:00
|
|
|
{
|
2010-11-15 12:23:24 +01:00
|
|
|
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
2009-06-13 12:26:29 +02:00
|
|
|
struct net *net = nf_ct_net(ct);
|
2010-02-03 13:51:51 +01:00
|
|
|
struct nf_conntrack_ecache *e;
|
2009-06-02 20:08:44 +02:00
|
|
|
|
2010-02-03 13:51:51 +01:00
|
|
|
if (!ctmask && !expmask && net->ct.sysctl_events) {
|
|
|
|
ctmask = ~0;
|
|
|
|
expmask = ~0;
|
|
|
|
}
|
|
|
|
if (!ctmask && !expmask)
|
2009-06-13 12:26:29 +02:00
|
|
|
return NULL;
|
2009-06-02 20:08:44 +02:00
|
|
|
|
2010-02-03 13:51:51 +01:00
|
|
|
e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
|
|
|
|
if (e) {
|
|
|
|
e->ctmask = ctmask;
|
|
|
|
e->expmask = expmask;
|
|
|
|
}
|
|
|
|
return e;
|
2010-11-15 12:23:24 +01:00
|
|
|
#else
|
|
|
|
return NULL;
|
|
|
|
#endif
|
2009-06-02 20:08:44 +02:00
|
|
|
};
|
|
|
|
|
2006-11-29 02:35:01 +01:00
|
|
|
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
2008-11-18 11:56:20 +01:00
|
|
|
/* This structure is passed to event handler */
|
|
|
|
struct nf_ct_event {
|
|
|
|
struct nf_conn *ct;
|
|
|
|
u32 pid;
|
|
|
|
int report;
|
|
|
|
};
|
|
|
|
|
2009-06-03 10:32:06 +02:00
|
|
|
struct nf_ct_event_notifier {
|
|
|
|
int (*fcn)(unsigned int events, struct nf_ct_event *item);
|
|
|
|
};
|
|
|
|
|
2011-11-22 00:16:51 +01:00
|
|
|
extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
|
|
|
|
extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
|
2006-11-29 02:35:01 +01:00
|
|
|
|
2009-06-13 12:26:29 +02:00
|
|
|
extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
|
2006-11-29 02:35:01 +01:00
|
|
|
|
|
|
|
static inline void
|
2008-10-08 11:35:07 +02:00
|
|
|
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
|
2006-11-29 02:35:01 +01:00
|
|
|
{
|
2011-11-22 00:16:51 +01:00
|
|
|
struct net *net = nf_ct_net(ct);
|
2009-06-13 12:26:29 +02:00
|
|
|
struct nf_conntrack_ecache *e;
|
|
|
|
|
2011-11-22 00:16:51 +01:00
|
|
|
if (net->ct.nf_conntrack_event_cb == NULL)
|
2009-06-13 12:26:29 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
e = nf_ct_ecache_find(ct);
|
|
|
|
if (e == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
set_bit(event, &e->cache);
|
2006-11-29 02:35:01 +01:00
|
|
|
}
|
|
|
|
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
static inline int
|
2009-06-13 12:26:29 +02:00
|
|
|
nf_conntrack_eventmask_report(unsigned int eventmask,
|
|
|
|
struct nf_conn *ct,
|
|
|
|
u32 pid,
|
|
|
|
int report)
|
2006-11-29 02:35:01 +01:00
|
|
|
{
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
int ret = 0;
|
2011-11-22 00:16:51 +01:00
|
|
|
struct net *net = nf_ct_net(ct);
|
2009-06-03 10:32:06 +02:00
|
|
|
struct nf_ct_event_notifier *notify;
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
struct nf_conntrack_ecache *e;
|
2009-06-03 10:32:06 +02:00
|
|
|
|
|
|
|
rcu_read_lock();
|
2011-11-22 00:16:51 +01:00
|
|
|
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
|
2009-06-03 10:32:06 +02:00
|
|
|
if (notify == NULL)
|
|
|
|
goto out_unlock;
|
|
|
|
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
e = nf_ct_ecache_find(ct);
|
|
|
|
if (e == NULL)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2009-06-03 10:32:06 +02:00
|
|
|
if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
|
|
|
|
struct nf_ct_event item = {
|
|
|
|
.ct = ct,
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
.pid = e->pid ? e->pid : pid,
|
2009-06-03 10:32:06 +02:00
|
|
|
.report = report
|
|
|
|
};
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
/* This is a resent of a destroy event? If so, skip missed */
|
|
|
|
unsigned long missed = e->pid ? 0 : e->missed;
|
|
|
|
|
2010-02-03 13:51:51 +01:00
|
|
|
if (!((eventmask | missed) & e->ctmask))
|
|
|
|
goto out_unlock;
|
|
|
|
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
ret = notify->fcn(eventmask | missed, &item);
|
|
|
|
if (unlikely(ret < 0 || missed)) {
|
|
|
|
spin_lock_bh(&ct->lock);
|
|
|
|
if (ret < 0) {
|
|
|
|
/* This is a destroy event that has been
|
|
|
|
* triggered by a process, we store the PID
|
|
|
|
* to include it in the retransmission. */
|
|
|
|
if (eventmask & (1 << IPCT_DESTROY) &&
|
|
|
|
e->pid == 0 && pid != 0)
|
|
|
|
e->pid = pid;
|
|
|
|
else
|
|
|
|
e->missed |= eventmask;
|
|
|
|
} else
|
|
|
|
e->missed &= ~missed;
|
|
|
|
spin_unlock_bh(&ct->lock);
|
|
|
|
}
|
2009-06-03 10:32:06 +02:00
|
|
|
}
|
|
|
|
out_unlock:
|
|
|
|
rcu_read_unlock();
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
return ret;
|
2006-11-29 02:35:01 +01:00
|
|
|
}
|
|
|
|
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
static inline int
|
2009-06-13 12:26:29 +02:00
|
|
|
nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
|
|
|
|
u32 pid, int report)
|
|
|
|
{
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
return nf_conntrack_eventmask_report(1 << event, ct, pid, report);
|
2009-06-13 12:26:29 +02:00
|
|
|
}
|
|
|
|
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
static inline int
|
2008-11-18 11:56:20 +01:00
|
|
|
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
|
|
|
|
{
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
|
2008-11-18 11:56:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
struct nf_exp_event {
|
|
|
|
struct nf_conntrack_expect *exp;
|
|
|
|
u32 pid;
|
|
|
|
int report;
|
|
|
|
};
|
|
|
|
|
2009-06-03 10:32:06 +02:00
|
|
|
struct nf_exp_event_notifier {
|
|
|
|
int (*fcn)(unsigned int events, struct nf_exp_event *item);
|
|
|
|
};
|
|
|
|
|
2011-11-22 00:16:51 +01:00
|
|
|
extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
|
|
|
|
extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
|
2007-03-15 00:40:10 +01:00
|
|
|
|
2008-11-18 11:56:20 +01:00
|
|
|
static inline void
|
|
|
|
nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
|
|
|
|
struct nf_conntrack_expect *exp,
|
|
|
|
u32 pid,
|
|
|
|
int report)
|
|
|
|
{
|
2011-11-22 00:16:51 +01:00
|
|
|
struct net *net = nf_ct_exp_net(exp);
|
2009-06-03 10:32:06 +02:00
|
|
|
struct nf_exp_event_notifier *notify;
|
2010-02-03 13:51:51 +01:00
|
|
|
struct nf_conntrack_ecache *e;
|
2009-06-03 10:32:06 +02:00
|
|
|
|
|
|
|
rcu_read_lock();
|
2011-11-22 00:16:51 +01:00
|
|
|
notify = rcu_dereference(net->ct.nf_expect_event_cb);
|
2009-06-03 10:32:06 +02:00
|
|
|
if (notify == NULL)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2010-02-03 13:51:51 +01:00
|
|
|
e = nf_ct_ecache_find(exp->master);
|
|
|
|
if (e == NULL)
|
2009-06-13 12:26:29 +02:00
|
|
|
goto out_unlock;
|
|
|
|
|
2010-02-03 13:51:51 +01:00
|
|
|
if (e->expmask & (1 << event)) {
|
2009-06-03 10:32:06 +02:00
|
|
|
struct nf_exp_event item = {
|
|
|
|
.exp = exp,
|
|
|
|
.pid = pid,
|
|
|
|
.report = report
|
|
|
|
};
|
2009-06-13 12:26:29 +02:00
|
|
|
notify->fcn(1 << event, &item);
|
2009-06-03 10:32:06 +02:00
|
|
|
}
|
|
|
|
out_unlock:
|
|
|
|
rcu_read_unlock();
|
2008-11-18 11:56:20 +01:00
|
|
|
}
|
|
|
|
|
2006-11-29 02:35:01 +01:00
|
|
|
static inline void
|
2007-07-08 07:30:49 +02:00
|
|
|
nf_ct_expect_event(enum ip_conntrack_expect_events event,
|
|
|
|
struct nf_conntrack_expect *exp)
|
2006-11-29 02:35:01 +01:00
|
|
|
{
|
2008-11-18 11:56:20 +01:00
|
|
|
nf_ct_expect_event_report(event, exp, 0, 0);
|
2006-11-29 02:35:01 +01:00
|
|
|
}
|
|
|
|
|
2008-10-08 11:35:07 +02:00
|
|
|
extern int nf_conntrack_ecache_init(struct net *net);
|
|
|
|
extern void nf_conntrack_ecache_fini(struct net *net);
|
|
|
|
|
2006-11-29 02:35:01 +01:00
|
|
|
#else /* CONFIG_NF_CONNTRACK_EVENTS */
|
|
|
|
|
|
|
|
static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
|
2008-10-11 18:46:24 +02:00
|
|
|
struct nf_conn *ct) {}
|
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast
listener has set the NETLINK_BROADCAST_ERROR socket option.
The logic is the following: if an event delivery fails, we keep
the undelivered events in the missed event cache. Once the next
packet arrives, we add the new events (if any) to the missed
events in the cache and we try a new delivery, and so on. Thus,
if ctnetlink fails to deliver an event, we try to deliver them
once we see a new packet. Therefore, we may lose state
transitions but the userspace process gets in sync at some point.
At worst case, if no events were delivered to userspace, we make
sure that destroy events are successfully delivered. Basically,
if ctnetlink fails to deliver the destroy event, we remove the
conntrack entry from the hashes and we insert them in the dying
list, which contains inactive entries. Then, the conntrack timer
is added with an extra grace timeout of random32() % 15 seconds
to trigger the event again (this grace timeout is tunable via
/proc). The use of a limited random timeout value allows
distributing the "destroy" resends, thus, avoiding accumulating
lots "destroy" events at the same time. Event delivery may
re-order but we can identify them by means of the tuple plus
the conntrack ID.
The maximum number of conntrack entries (active or inactive) is
still handled by nf_conntrack_max. Thus, we may start dropping
packets at some point if we accumulate a lot of inactive conntrack
entries that did not successfully report the destroy event to
userspace.
During my stress tests consisting of setting a very small buffer
of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket
flag, and generating lots of very small connections, I noticed
very few destroy entries on the fly waiting to be resend.
A simple way to test this patch consist of creating a lot of
entries, set a very small Netlink buffer in conntrackd (+ a patch
which is not in the git tree to set the BROADCAST_ERROR flag)
and invoke `conntrack -F'.
For expectations, no changes are introduced in this patch.
Currently, event delivery is only done for new expectations (no
events from expectation expiration, removal and confirmation).
In that case, they need a per-expectation event cache to implement
the same idea that is exposed in this patch.
This patch can be useful to provide reliable flow-accouting. We
still have to add a new conntrack extension to store the creation
and destroy time.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
2009-06-13 12:30:52 +02:00
|
|
|
static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
|
|
|
|
struct nf_conn *ct,
|
|
|
|
u32 pid,
|
|
|
|
int report) { return 0; }
|
|
|
|
static inline int nf_conntrack_event(enum ip_conntrack_events event,
|
|
|
|
struct nf_conn *ct) { return 0; }
|
|
|
|
static inline int nf_conntrack_event_report(enum ip_conntrack_events event,
|
|
|
|
struct nf_conn *ct,
|
|
|
|
u32 pid,
|
|
|
|
int report) { return 0; }
|
2006-11-29 02:35:01 +01:00
|
|
|
static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
|
2007-07-08 07:30:49 +02:00
|
|
|
static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
|
|
|
|
struct nf_conntrack_expect *exp) {}
|
2008-11-18 11:56:20 +01:00
|
|
|
static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
|
|
|
|
struct nf_conntrack_expect *exp,
|
|
|
|
u32 pid,
|
|
|
|
int report) {}
|
2008-10-08 11:35:07 +02:00
|
|
|
|
|
|
|
static inline int nf_conntrack_ecache_init(struct net *net)
|
|
|
|
{
|
|
|
|
return 0;
|
2008-10-10 06:10:36 +02:00
|
|
|
}
|
2008-10-08 11:35:07 +02:00
|
|
|
|
|
|
|
static inline void nf_conntrack_ecache_fini(struct net *net)
|
|
|
|
{
|
|
|
|
}
|
2006-11-29 02:35:01 +01:00
|
|
|
#endif /* CONFIG_NF_CONNTRACK_EVENTS */
|
|
|
|
|
|
|
|
#endif /*_NF_CONNTRACK_ECACHE_H*/
|
|
|
|
|