Merge branch 'mac80211' into mac80211-next

Merge mac80211.git to get some changes that would otherwise
cause conflicts with new changes coming here.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
Johannes Berg 2015-01-07 14:39:02 +01:00
commit 3a4b0c948d
28 changed files with 348 additions and 220 deletions

View File

@ -1633,6 +1633,48 @@ There are some more advanced barrier functions:
operations" subsection for information on where to use these.
(*) dma_wmb();
(*) dma_rmb();
These are for use with consistent memory to guarantee the ordering
of writes or reads of shared memory accessible to both the CPU and a
DMA capable device.
For example, consider a device driver that shares memory with a device
and uses a descriptor status value to indicate if the descriptor belongs
to the device or the CPU, and a doorbell to notify it when new
descriptors are available:
if (desc->status != DEVICE_OWN) {
/* do not read data until we own descriptor */
dma_rmb();
/* read/modify data */
read_data = desc->data;
desc->data = write_data;
/* flush modifications before status update */
dma_wmb();
/* assign ownership */
desc->status = DEVICE_OWN;
/* force memory to sync before notifying device via MMIO */
wmb();
/* notify device of new descriptors */
writel(DESC_NOTIFY, doorbell);
}
The dma_rmb() allows us guarantee the device has released ownership
before we read the data from the descriptor, and he dma_wmb() allows
us to guarantee the data is written to the descriptor before the device
can see it now has ownership. The wmb() is needed to guarantee that the
cache coherent memory writes have completed before attempting a write to
the cache incoherent MMIO region.
See Documentation/DMA-API.txt for more information on consistent memory.
MMIO WRITE BARRIER
------------------

View File

@ -7,6 +7,57 @@
#define rmb() __asm__ __volatile__("mb": : :"memory")
#define wmb() __asm__ __volatile__("wmb": : :"memory")
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
#ifdef CONFIG_SMP

View File

@ -43,10 +43,14 @@
#define mb() do { dsb(); outer_sync(); } while (0)
#define rmb() dsb()
#define wmb() do { dsb(st); outer_sync(); } while (0)
#define dma_rmb() dmb(osh)
#define dma_wmb() dmb(oshst)
#else
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define dma_rmb() barrier()
#define dma_wmb() barrier()
#endif
#ifndef CONFIG_SMP

View File

@ -32,6 +32,9 @@
#define rmb() dsb(ld)
#define wmb() dsb(st)
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
#ifndef CONFIG_SMP
#define smp_mb() barrier()
#define smp_rmb() barrier()

View File

@ -22,6 +22,57 @@
# define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
# define rmb() do { barrier(); smp_check_barrier(); } while (0)
# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
/*
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
#endif

View File

@ -35,26 +35,25 @@
* it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only.
*/
#define mb() ia64_mf()
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define mb() ia64_mf()
#define rmb() mb()
#define wmb() mb()
#define dma_rmb() mb()
#define dma_wmb() mb()
#ifdef CONFIG_SMP
# define smp_mb() mb()
# define smp_rmb() rmb()
# define smp_wmb() wmb()
# define smp_read_barrier_depends() read_barrier_depends()
#else
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_read_barrier_depends() do { } while(0)
#endif
#define smp_rmb() smp_mb()
#define smp_wmb() smp_mb()
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()

View File

@ -4,8 +4,6 @@
#include <asm/metag_mem.h>
#define nop() asm volatile ("NOP")
#define mb() wmb()
#define rmb() barrier()
#ifdef CONFIG_METAG_META21
@ -41,13 +39,13 @@ static inline void wr_fence(void)
#endif /* !CONFIG_METAG_META21 */
static inline void wmb(void)
{
/* flush writes through the write combiner */
wr_fence();
}
/* flush writes through the write combiner */
#define mb() wr_fence()
#define rmb() barrier()
#define wmb() mb()
#define read_barrier_depends() do { } while (0)
#define dma_rmb() rmb()
#define dma_wmb() wmb()
#ifndef CONFIG_SMP
#define fence() do { } while (0)
@ -82,7 +80,10 @@ static inline void fence(void)
#define smp_wmb() barrier()
#endif
#endif
#define smp_read_barrier_depends() do { } while (0)
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
#define smp_store_release(p, v) \

View File

@ -10,58 +10,6 @@
#include <asm/addrspace.h>
/*
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
@ -127,20 +75,21 @@
#include <asm/wbflush.h>
#define wmb() fast_wmb()
#define rmb() fast_rmb()
#define mb() wbflush()
#define iob() wbflush()
#else /* !CONFIG_CPU_HAS_WB */
#define wmb() fast_wmb()
#define rmb() fast_rmb()
#define mb() fast_mb()
#define iob() fast_iob()
#endif /* !CONFIG_CPU_HAS_WB */
#define wmb() fast_wmb()
#define rmb() fast_rmb()
#define dma_wmb() fast_wmb()
#define dma_rmb() fast_rmb()
#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
# ifdef CONFIG_CPU_CAVIUM_OCTEON
# define smp_mb() __sync()

View File

@ -33,12 +33,9 @@
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#ifdef CONFIG_SMP
#ifdef __SUBARCH_HAS_LWSYNC
# define SMPWMB LWSYNC
#else
@ -46,20 +43,26 @@
#endif
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
#ifdef CONFIG_SMP
#define smp_lwsync() __lwsync()
#define smp_mb() mb()
#define smp_rmb() __lwsync()
#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define __lwsync() barrier()
#define smp_lwsync() barrier()
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif /* CONFIG_SMP */
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
/*
* This is a barrier which prevents following instructions from being
* started until the value of the argument x is known. For example, if
@ -72,7 +75,7 @@
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
__lwsync(); \
smp_lwsync(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
@ -80,7 +83,7 @@ do { \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
__lwsync(); \
smp_lwsync(); \
___p1; \
})

View File

@ -24,11 +24,14 @@
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define dma_rmb() rmb()
#define dma_wmb() wmb()
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()

View File

@ -37,7 +37,9 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define rmb() __asm__ __volatile__("":::"memory")
#define wmb() __asm__ __volatile__("":::"memory")
#define read_barrier_depends() do { } while(0)
#define dma_rmb() rmb()
#define dma_wmb() wmb()
#define set_mb(__var, __value) \
do { __var = __value; membar_safe("#StoreLoad"); } while(0)
@ -51,7 +53,8 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define smp_wmb() __asm__ __volatile__("":::"memory")
#endif
#define smp_read_barrier_depends() do { } while(0)
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
#define smp_store_release(p, v) \
do { \

View File

@ -24,78 +24,28 @@
#define wmb() asm volatile("sfence" ::: "memory")
#endif
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0)
#ifdef CONFIG_X86_PPRO_FENCE
#define dma_rmb() rmb()
#else
#define dma_rmb() barrier()
#endif
#define dma_wmb() barrier()
#ifdef CONFIG_SMP
#define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE
# define smp_rmb() rmb()
#else
# define smp_rmb() barrier()
#endif
#define smp_rmb() dma_rmb()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif /* SMP */
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
#if defined(CONFIG_X86_PPRO_FENCE)
/*

View File

@ -29,20 +29,18 @@
#endif /* CONFIG_X86_32 */
#define read_barrier_depends() do { } while (0)
#ifdef CONFIG_X86_PPRO_FENCE
#define dma_rmb() rmb()
#else /* CONFIG_X86_PPRO_FENCE */
#define dma_rmb() barrier()
#endif /* CONFIG_X86_PPRO_FENCE */
#define dma_wmb() barrier()
#ifdef CONFIG_SMP
#define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE
#define smp_rmb() rmb()
#else /* CONFIG_X86_PPRO_FENCE */
#define smp_rmb() barrier()
#endif /* CONFIG_X86_PPRO_FENCE */
#define smp_rmb() dma_rmb()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* CONFIG_SMP */
@ -50,11 +48,13 @@
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif /* CONFIG_SMP */
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined

View File

@ -684,10 +684,9 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
struct fixed_phy_status *status)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
u32 link, duplex, pause, speed;
u32 duplex, pause, speed;
u32 reg;
link = core_readl(priv, CORE_LNKSTS);
duplex = core_readl(priv, CORE_DUPSTS);
pause = core_readl(priv, CORE_PAUSESTS);
speed = core_readl(priv, CORE_SPDSTS);
@ -701,22 +700,26 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
* which means that we need to force the link at the port override
* level to get the data to flow. We do use what the interrupt handler
* did determine before.
*
* For the other ports, we just force the link status, since this is
* a fixed PHY device.
*/
if (port == 7) {
status->link = priv->port_sts[port].link;
reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(7));
reg |= SW_OVERRIDE;
if (status->link)
reg |= LINK_STS;
else
reg &= ~LINK_STS;
core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(7));
status->duplex = 1;
} else {
status->link = !!(link & (1 << port));
status->link = 1;
status->duplex = !!(duplex & (1 << port));
}
reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
reg |= SW_OVERRIDE;
if (status->link)
reg |= LINK_STS;
else
reg &= ~LINK_STS;
core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
switch (speed) {
case SPDSTS_10:
status->speed = SPEED_10;

View File

@ -615,14 +615,14 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_DD))
if (!rx_desc->d.staterr)
break;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* RXD_STATUS_DD bit is set
* descriptor has been written back
*/
rmb();
dma_rmb();
/* retrieve a buffer from the ring */
skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);

View File

@ -6910,14 +6910,14 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
if (!rx_desc->wb.upper.status_error)
break;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* RXD_STAT_DD bit is set
* descriptor has been written back
*/
rmb();
dma_rmb();
/* retrieve a buffer from the ring */
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);

View File

@ -2009,15 +2009,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
if (!rx_desc->wb.upper.status_error)
break;
/*
* This memory barrier is needed to keep us from reading
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* RXD_STAT_DD bit is set
* descriptor has been written back
*/
rmb();
dma_rmb();
/* retrieve a buffer from the ring */
skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);

View File

@ -5919,7 +5919,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
}
static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
static void rtl_hw_start_8168g(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
@ -5954,6 +5954,24 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168g_1[] = {
{ 0x00, 0x0000, 0x0008 },
{ 0x0c, 0x37d0, 0x0820 },
{ 0x1e, 0x0000, 0x0001 },
{ 0x19, 0x8000, 0x0000 }
};
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
}
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@ -5964,7 +5982,7 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
{ 0x1e, 0xffff, 0x20eb }
};
rtl_hw_start_8168g_1(tp);
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
@ -5983,7 +6001,7 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
{ 0x1e, 0x0000, 0x2000 }
};
rtl_hw_start_8168g_1(tp);
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
@ -6605,6 +6623,9 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
}
@ -6612,7 +6633,6 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
u32 rx_buf_sz)
{
desc->addr = cpu_to_le64(mapping);
wmb();
rtl8169_mark_to_asic(desc, rx_buf_sz);
}
@ -7073,16 +7093,18 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
wmb();
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
/* Anti gcc 2.95.3 bugware (sic) */
status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
txd->opts1 = cpu_to_le32(status);
tp->cur_tx += frags + 1;
/* Force all memory writes to complete before notifying device */
wmb();
tp->cur_tx += frags + 1;
RTL_W8(TxPoll, NPQ);
mmiowb();
@ -7181,11 +7203,16 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
struct ring_info *tx_skb = tp->tx_skb + entry;
u32 status;
rmb();
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
if (status & DescOwn)
break;
/* This barrier is needed to keep us from reading
* any other fields out of the Tx descriptor until
* we know the status of DescOwn
*/
dma_rmb();
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
@ -7280,11 +7307,16 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
rmb();
status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
if (status & DescOwn)
break;
/* This barrier is needed to keep us from reading
* any other fields out of the Rx descriptor until
* we know the status of DescOwn
*/
dma_rmb();
if (unlikely(status & RxRES)) {
netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
status);
@ -7346,7 +7378,6 @@ process_pkt:
}
release_descriptor:
desc->opts2 = 0;
wmb();
rtl8169_mark_to_asic(desc, rx_buf_sz);
}

View File

@ -42,6 +42,14 @@
#define wmb() mb()
#endif
#ifndef dma_rmb
#define dma_rmb() rmb()
#endif
#ifndef dma_wmb
#define dma_wmb() wmb()
#endif
#ifndef read_barrier_depends
#define read_barrier_depends() do { } while (0)
#endif

View File

@ -1281,8 +1281,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
*
* @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
* driver to indicate that it requires IV generation for this
* particular key. Setting this flag does not necessarily mean that SKBs
* will have sufficient tailroom for ICV or MIC.
* particular key.
* @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
* the driver for a TKIP key if it requires Michael MIC
* generation in software.
@ -1294,9 +1293,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
* @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
* if space should be prepared for the IV, but the IV
* itself should not be generated. Do not set together with
* @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
* not necessarily mean that SKBs will have sufficient tailroom for ICV or
* MIC.
* @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
* @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
* management frames. The flag can help drivers that have a hardware
* crypto implementation that doesn't deal with management frames

View File

@ -512,7 +512,7 @@ static int dsa_slave_fixed_link_update(struct net_device *dev,
}
/* slave device setup *******************************************************/
static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
struct net_device *slave_dev)
{
struct dsa_switch *ds = p->parent;
@ -533,7 +533,7 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
ret = of_phy_register_fixed_link(port_dn);
if (ret) {
netdev_err(slave_dev, "failed to register fixed PHY\n");
return;
return ret;
}
phy_is_fixed = true;
phy_dn = port_dn;
@ -555,12 +555,17 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
*/
if (!p->phy) {
p->phy = ds->slave_mii_bus->phy_map[p->port];
if (!p->phy)
return -ENODEV;
phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
p->phy_interface);
} else {
netdev_info(slave_dev, "attached PHY at address %d [%s]\n",
p->phy->addr, p->phy->drv->name);
}
return 0;
}
int dsa_slave_suspend(struct net_device *slave_dev)
@ -653,12 +658,17 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
p->old_link = -1;
p->old_duplex = -1;
dsa_slave_phy_setup(p, slave_dev);
ret = dsa_slave_phy_setup(p, slave_dev);
if (ret) {
free_netdev(slave_dev);
return NULL;
}
ret = register_netdev(slave_dev);
if (ret) {
netdev_err(master, "error %d registering interface %s\n",
ret, slave_dev->name);
phy_disconnect(p->phy);
free_netdev(slave_dev);
return NULL;
}

View File

@ -1011,6 +1011,10 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
ieee80211_recalc_smps_chanctx(local, new_ctx);
ieee80211_recalc_radar_chanctx(local, new_ctx);
ieee80211_recalc_chanctx_min_def(local, new_ctx);
if (changed)
ieee80211_bss_info_change_notify(sdata, changed);

View File

@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
(key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
sdata->crypto_tx_tailroom_needed_cnt--;
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sta = key->sta;
sdata = key->sdata;
if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
(key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(sdata);
ret = drv_set_key(key->local, DISABLE_KEY, sdata,
@ -656,7 +660,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
int i;
mutex_lock(&local->key_mtx);
for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) {
key = key_mtx_dereference(local, sta->gtk[i]);
if (!key)
continue;
@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
(key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(key->sdata);
}

View File

@ -178,6 +178,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (!(ht_cap->cap_info &
cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40))) {
ret = IEEE80211_STA_DISABLE_40MHZ;
vht_chandef = *chandef;
goto out;
}

View File

@ -1761,14 +1761,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
goto out;
if (is_multicast_ether_addr(hdr->addr1)) {
rx->local->dot11MulticastReceivedFrameCount++;
goto out;
goto out_no_led;
}
if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
goto out;
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
if (skb_linearize(rx->skb))
@ -1859,9 +1859,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
status->rx_flags |= IEEE80211_RX_FRAGMENTED;
out:
ieee80211_led_rx(rx->local);
out_no_led:
if (rx->sta)
rx->sta->rx_packets++;
ieee80211_led_rx(rx->local);
return RX_CONTINUE;
}

View File

@ -603,7 +603,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
u32 width, control_freq;
u32 width, control_freq, cap;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
return false;
@ -643,7 +643,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
return false;
break;
case NL80211_CHAN_WIDTH_80P80:
if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return false;
case NL80211_CHAN_WIDTH_80:
if (!vht_cap->vht_supported)
@ -654,7 +655,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
case NL80211_CHAN_WIDTH_160:
if (!vht_cap->vht_supported)
return false;
if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return false;
prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
width = 160;

View File

@ -6128,7 +6128,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
}
/* there was no other matchset, so the RSSI one is alone */
if (i == 0)
if (i == 0 && n_match_sets)
request->match_sets[0].rssi_thold = default_match_rssi;
request->min_rssi_thold = INT_MAX;

View File

@ -1549,12 +1549,18 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
if (!wdev->beacon_interval)
goto out;
ret = cfg80211_reg_can_beacon(wiphy,
&wdev->chandef, wdev->iftype);
break;
case NL80211_IFTYPE_ADHOC:
if (!wdev->ssid_len)
goto out;
ret = cfg80211_reg_can_beacon(wiphy,
&wdev->chandef, wdev->iftype);
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_ADHOC:
if (!wdev->current_bss ||
!wdev->current_bss->pub.channel)
goto out;
@ -1920,7 +1926,7 @@ static enum reg_request_treatment
reg_process_hint_driver(struct wiphy *wiphy,
struct regulatory_request *driver_request)
{
const struct ieee80211_regdomain *regd;
const struct ieee80211_regdomain *regd, *tmp;
enum reg_request_treatment treatment;
treatment = __reg_process_hint_driver(driver_request);
@ -1940,7 +1946,10 @@ reg_process_hint_driver(struct wiphy *wiphy,
reg_free_request(driver_request);
return REG_REQ_IGNORE;
}
tmp = get_wiphy_regdom(wiphy);
rcu_assign_pointer(wiphy->regd, regd);
rcu_free_regdom(tmp);
}
@ -1999,11 +2008,8 @@ __reg_process_hint_country_ie(struct wiphy *wiphy,
return REG_REQ_IGNORE;
return REG_REQ_ALREADY_SET;
}
/*
* Two consecutive Country IE hints on the same wiphy.
* This should be picked up early by the driver/stack
*/
if (WARN_ON(regdom_changes(country_ie_request->alpha2)))
if (regdom_changes(country_ie_request->alpha2))
return REG_REQ_OK;
return REG_REQ_ALREADY_SET;
}