seqlock: Prevent rt starvation
If a low prio writer gets preempted while holding the seqlock write locked, a high prio reader spins forever on RT. To prevent this let the reader grab the spinlock, so it blocks and eventually boosts the writer. This way the writer can proceed and endless spinning is prevented. For seqcount writers we disable preemption over the update code path. Thaanks to Al Viro for distangling some VFS code to make that possible. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable-rt@vger.kernel.org
This commit is contained in:
parent
41d88b03da
commit
8ef1b9e61c
|
@ -203,20 +203,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
|
||||||
return __read_seqcount_retry(s, start);
|
return __read_seqcount_retry(s, start);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void __raw_write_seqcount_begin(seqcount_t *s)
|
||||||
|
|
||||||
static inline void raw_write_seqcount_begin(seqcount_t *s)
|
|
||||||
{
|
{
|
||||||
s->sequence++;
|
s->sequence++;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void raw_write_seqcount_end(seqcount_t *s)
|
static inline void raw_write_seqcount_begin(seqcount_t *s)
|
||||||
|
{
|
||||||
|
preempt_disable_rt();
|
||||||
|
__raw_write_seqcount_begin(s);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_write_seqcount_end(seqcount_t *s)
|
||||||
{
|
{
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
s->sequence++;
|
s->sequence++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void raw_write_seqcount_end(seqcount_t *s)
|
||||||
|
{
|
||||||
|
__raw_write_seqcount_end(s);
|
||||||
|
preempt_enable_rt();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sequence counter only version assumes that callers are using their
|
* Sequence counter only version assumes that callers are using their
|
||||||
* own mutexing.
|
* own mutexing.
|
||||||
|
@ -278,10 +288,33 @@ typedef struct {
|
||||||
/*
|
/*
|
||||||
* Read side functions for starting and finalizing a read side section.
|
* Read side functions for starting and finalizing a read side section.
|
||||||
*/
|
*/
|
||||||
|
#ifndef CONFIG_PREEMPT_RT_FULL
|
||||||
static inline unsigned read_seqbegin(const seqlock_t *sl)
|
static inline unsigned read_seqbegin(const seqlock_t *sl)
|
||||||
{
|
{
|
||||||
return read_seqcount_begin(&sl->seqcount);
|
return read_seqcount_begin(&sl->seqcount);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* Starvation safe read side for RT
|
||||||
|
*/
|
||||||
|
static inline unsigned read_seqbegin(seqlock_t *sl)
|
||||||
|
{
|
||||||
|
unsigned ret;
|
||||||
|
|
||||||
|
repeat:
|
||||||
|
ret = ACCESS_ONCE(sl->seqcount.sequence);
|
||||||
|
if (unlikely(ret & 1)) {
|
||||||
|
/*
|
||||||
|
* Take the lock and let the writer proceed (i.e. evtl
|
||||||
|
* boost it), otherwise we could loop here forever.
|
||||||
|
*/
|
||||||
|
spin_lock(&sl->lock);
|
||||||
|
spin_unlock(&sl->lock);
|
||||||
|
goto repeat;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
||||||
{
|
{
|
||||||
|
@ -296,36 +329,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
||||||
static inline void write_seqlock(seqlock_t *sl)
|
static inline void write_seqlock(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
spin_lock(&sl->lock);
|
spin_lock(&sl->lock);
|
||||||
write_seqcount_begin(&sl->seqcount);
|
__write_seqcount_begin(&sl->seqcount);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void write_sequnlock(seqlock_t *sl)
|
static inline void write_sequnlock(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
write_seqcount_end(&sl->seqcount);
|
__raw_write_seqcount_end(&sl->seqcount);
|
||||||
spin_unlock(&sl->lock);
|
spin_unlock(&sl->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void write_seqlock_bh(seqlock_t *sl)
|
static inline void write_seqlock_bh(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
spin_lock_bh(&sl->lock);
|
spin_lock_bh(&sl->lock);
|
||||||
write_seqcount_begin(&sl->seqcount);
|
__write_seqcount_begin(&sl->seqcount);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void write_sequnlock_bh(seqlock_t *sl)
|
static inline void write_sequnlock_bh(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
write_seqcount_end(&sl->seqcount);
|
__raw_write_seqcount_end(&sl->seqcount);
|
||||||
spin_unlock_bh(&sl->lock);
|
spin_unlock_bh(&sl->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void write_seqlock_irq(seqlock_t *sl)
|
static inline void write_seqlock_irq(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
spin_lock_irq(&sl->lock);
|
spin_lock_irq(&sl->lock);
|
||||||
write_seqcount_begin(&sl->seqcount);
|
__write_seqcount_begin(&sl->seqcount);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void write_sequnlock_irq(seqlock_t *sl)
|
static inline void write_sequnlock_irq(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
write_seqcount_end(&sl->seqcount);
|
__raw_write_seqcount_end(&sl->seqcount);
|
||||||
spin_unlock_irq(&sl->lock);
|
spin_unlock_irq(&sl->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -334,7 +367,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&sl->lock, flags);
|
spin_lock_irqsave(&sl->lock, flags);
|
||||||
write_seqcount_begin(&sl->seqcount);
|
__write_seqcount_begin(&sl->seqcount);
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,7 +377,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
||||||
static inline void
|
static inline void
|
||||||
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
|
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
|
||||||
{
|
{
|
||||||
write_seqcount_end(&sl->seqcount);
|
__raw_write_seqcount_end(&sl->seqcount);
|
||||||
spin_unlock_irqrestore(&sl->lock, flags);
|
spin_unlock_irqrestore(&sl->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -393,7 +393,7 @@ static inline void dst_confirm(struct dst_entry *dst)
|
||||||
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
|
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
const struct hh_cache *hh;
|
struct hh_cache *hh;
|
||||||
|
|
||||||
if (dst->pending_confirm) {
|
if (dst->pending_confirm) {
|
||||||
unsigned long now = jiffies;
|
unsigned long now = jiffies;
|
||||||
|
|
|
@ -388,7 +388,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
|
static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
unsigned int seq;
|
unsigned int seq;
|
||||||
int hh_len;
|
int hh_len;
|
||||||
|
@ -443,7 +443,7 @@ struct neighbour_cb {
|
||||||
|
|
||||||
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
|
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
|
||||||
|
|
||||||
static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
|
static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
|
||||||
const struct net_device *dev)
|
const struct net_device *dev)
|
||||||
{
|
{
|
||||||
unsigned int seq;
|
unsigned int seq;
|
||||||
|
|
Loading…
Reference in New Issue