sh/mmiowb: Add unconditional mmiowb() to arch_spin_unlock()

The mmiowb() macro is horribly difficult to use and drivers will continue
to work most of the time if they omit a call when it is required.

Rather than rely on driver authors getting this right, push mmiowb() into
arch_spin_unlock() for sh. If this is deemed to be a performance issue,
a subsequent optimisation could make use of ARCH_HAS_MMIOWB to elide
the barrier in cases where no I/O writes were performed inside the
critical section.

Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Rich Felker <dalias@libc.org>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Will Deacon 2019-02-22 13:37:21 +00:00
parent 0f43ca692d
commit e9e8543fec
4 changed files with 14 additions and 4 deletions

View File

@ -14,7 +14,6 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h

View File

@ -229,9 +229,6 @@ __BUILD_IOPORT_STRING(q, u64)
#define IO_SPACE_LIMIT 0xffffffff
/* synco on SH-4A, otherwise a nop */
#define mmiowb() wmb()
/* We really want to try and get these to memcpy etc */
void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
void memcpy_toio(volatile void __iomem *, const void *, unsigned long);

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_MMIOWB_H
#define __ASM_SH_MMIOWB_H
#include <asm/barrier.h>
/* synco on SH-4A, otherwise a nop */
#define mmiowb() wmb()
#include <asm-generic/mmiowb.h>
#endif /* __ASM_SH_MMIOWB_H */

View File

@ -47,6 +47,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
/* This could be optimised with ARCH_HAS_MMIOWB */
mmiowb();
__asm__ __volatile__ (
"mov #1, %0 ! arch_spin_unlock \n\t"
"mov.l %0, @%1 \n\t"