[PATCH] include/asm-i386/: "extern inline" -> "static inline"
"extern inline" doesn't make much sense. Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
ea0e0a4f53
commit
e2afe67453
@ -35,7 +35,7 @@
|
||||
*/
|
||||
#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
|
||||
|
||||
extern inline long
|
||||
static inline long
|
||||
div_ll_X_l_rem(long long divs, long div, long *rem)
|
||||
{
|
||||
long dum2;
|
||||
|
@ -679,7 +679,7 @@ static inline void rep_nop(void)
|
||||
However we don't do prefetches for pre XP Athlons currently
|
||||
That should be fixed. */
|
||||
#define ARCH_HAS_PREFETCH
|
||||
extern inline void prefetch(const void *x)
|
||||
static inline void prefetch(const void *x)
|
||||
{
|
||||
alternative_input(ASM_NOP4,
|
||||
"prefetchnta (%1)",
|
||||
@ -693,7 +693,7 @@ extern inline void prefetch(const void *x)
|
||||
|
||||
/* 3dnow! prefetch to get an exclusive cache line. Useful for
|
||||
spinlocks to avoid one state transition in the cache coherency protocol. */
|
||||
extern inline void prefetchw(const void *x)
|
||||
static inline void prefetchw(const void *x)
|
||||
{
|
||||
alternative_input(ASM_NOP4,
|
||||
"prefetchw (%1)",
|
||||
|
Loading…
Reference in New Issue
Block a user