Merge branch 'for-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu into slub/lockless

This commit is contained in:
Pekka Enberg 2011-03-11 17:42:05 +02:00
commit 2a6c5176ec
23 changed files with 279 additions and 33 deletions

View File

@ -1,5 +1,6 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/page.h>
OUTPUT_FORMAT("elf64-alpha")
@ -38,7 +39,7 @@ SECTIONS
__init_begin = ALIGN(PAGE_SIZE);
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
PERCPU(PAGE_SIZE)
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
/* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
needed for the THREAD_SIZE aligned init_task gets freed after init */
. = ALIGN(THREAD_SIZE);
@ -46,7 +47,7 @@ SECTIONS
/* Freed after init ends here */
_data = .;
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
.got : {
*(.got)

View File

@ -78,7 +78,7 @@ SECTIONS
#endif
}
PERCPU(PAGE_SIZE)
PERCPU(32, PAGE_SIZE)
#ifndef CONFIG_XIP_KERNEL
. = ALIGN(PAGE_SIZE);

View File

@ -136,7 +136,7 @@ SECTIONS
. = ALIGN(16);
INIT_DATA_SECTION(16)
PERCPU(4)
PERCPU(32, 4)
.exit.data :
{

View File

@ -102,7 +102,7 @@ SECTIONS
#endif
__vmlinux_end = .; /* Last address of the physical file. */
#ifdef CONFIG_ETRAX_ARCH_V32
PERCPU(PAGE_SIZE)
PERCPU(32, PAGE_SIZE)
.init.ramfs : {
INIT_RAM_FS

View File

@ -37,7 +37,7 @@ SECTIONS
_einittext = .;
INIT_DATA_SECTION(8)
PERCPU(4096)
PERCPU(L1_CACHE_BYTES, 4096)
. = ALIGN(PAGE_SIZE);
__init_end = .;

View File

@ -198,7 +198,7 @@ SECTIONS {
/* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE);
PERCPU_VADDR(PERCPU_ADDR, :percpu)
PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
__phys_per_cpu_start = __per_cpu_load;
/*
* ensure percpu data fits

View File

@ -53,7 +53,7 @@ SECTIONS
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
PERCPU(PAGE_SIZE)
PERCPU(32, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View File

@ -115,7 +115,7 @@ SECTIONS
EXIT_DATA
}
PERCPU(PAGE_SIZE)
PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View File

@ -70,7 +70,7 @@ SECTIONS
.exit.text : { EXIT_TEXT; }
.exit.data : { EXIT_DATA; }
PERCPU(PAGE_SIZE)
PERCPU(32, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View File

@ -145,7 +145,7 @@ SECTIONS
EXIT_DATA
}
PERCPU(PAGE_SIZE)
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View File

@ -160,7 +160,7 @@ SECTIONS
INIT_RAM_FS
}
PERCPU(PAGE_SIZE)
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(8);
.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {

View File

@ -77,7 +77,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
PERCPU(PAGE_SIZE)
PERCPU(0x100, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */

View File

@ -66,7 +66,7 @@ SECTIONS
__machvec_end = .;
}
PERCPU(PAGE_SIZE)
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
/*
* .exit.text is discarded at runtime, not link time, to deal with

View File

@ -108,7 +108,7 @@ SECTIONS
__sun4v_2insn_patch_end = .;
}
PERCPU(PAGE_SIZE)
PERCPU(SMP_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;

View File

@ -63,7 +63,7 @@ SECTIONS
*(.init.page)
} :data =0
INIT_DATA_SECTION(16)
PERCPU(PAGE_SIZE)
PERCPU(L2_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_einitdata) = .;

View File

@ -42,7 +42,7 @@
INIT_SETUP(0)
}
PERCPU(32)
PERCPU(32, 32)
.initcall.init : {
INIT_CALLS

View File

@ -451,6 +451,26 @@ do { \
#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#endif /* !CONFIG_M386 */
#ifdef CONFIG_X86_CMPXCHG64
#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \
({ \
char __ret; \
typeof(o1) __o1 = o1; \
typeof(o1) __n1 = n1; \
typeof(o2) __o2 = o2; \
typeof(o2) __n2 = n2; \
typeof(o2) __dummy = n2; \
asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
: "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \
: "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \
__ret; \
})
#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
#endif /* CONFIG_X86_CMPXCHG64 */
/*
* Per cpu atomic 64 bit operations are only available under 64 bit.
* 32 bit must fall back to generic operations.
@ -480,6 +500,34 @@ do { \
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
/*
* Pretty complex macro to generate cmpxchg16 instruction. The instruction
* is not supported on early AMD64 processors so we must be able to emulate
* it in software. The address used in the cmpxchg16 instruction must be
* aligned to a 16 byte boundary.
*/
#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
({ \
char __ret; \
typeof(o1) __o1 = o1; \
typeof(o1) __n1 = n1; \
typeof(o2) __o2 = o2; \
typeof(o2) __n2 = n2; \
typeof(o2) __dummy; \
alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \
"cmpxchg16b %%gs:(%%rsi)\n\tsetz %0\n\t", \
X86_FEATURE_CX16, \
ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
"S" (&pcp1), "b"(__n1), "c"(__n2), \
"a"(__o1), "d"(__o2)); \
__ret; \
})
#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
#endif
/* This is not atomic against other CPUs -- CPU preemption needs to be off */

View File

@ -230,7 +230,7 @@ SECTIONS
* output PHDR, so the next output section - .init.text - should
* start another segment - init.
*/
PERCPU_VADDR(0, :percpu)
PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
#endif
INIT_TEXT_SECTION(PAGE_SIZE)
@ -305,7 +305,7 @@ SECTIONS
}
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
PERCPU(THREAD_SIZE)
PERCPU(INTERNODE_CACHE_BYTES, THREAD_SIZE)
#endif
. = ALIGN(PAGE_SIZE);

View File

@ -42,4 +42,5 @@ else
lib-y += memmove_64.o memset_64.o
lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
lib-y += cmpxchg16b_emu.o
endif

View File

@ -0,0 +1,59 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/frame.h>
#include <asm/dwarf2.h>
.text
/*
* Inputs:
* %rsi : memory location to compare
* %rax : low 64 bits of old value
* %rdx : high 64 bits of old value
* %rbx : low 64 bits of new value
* %rcx : high 64 bits of new value
* %al : Operation successful
*/
ENTRY(this_cpu_cmpxchg16b_emu)
CFI_STARTPROC
#
# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
# via the ZF. Caller will access %al to get result.
#
# Note that this is only useful for a cpuops operation. Meaning that we
# do *not* have a fully atomic operation but just an operation that is
# *atomic* on a single cpu (as provided by the this_cpu_xx class of
# macros).
#
this_cpu_cmpxchg16b_emu:
pushf
cli
cmpq %gs:(%rsi), %rax
jne not_same
cmpq %gs:8(%rsi), %rdx
jne not_same
movq %rbx, %gs:(%rsi)
movq %rcx, %gs:8(%rsi)
popf
mov $1, %al
ret
not_same:
popf
xor %al,%al
ret
CFI_ENDPROC
ENDPROC(this_cpu_cmpxchg16b_emu)

View File

@ -155,7 +155,7 @@ SECTIONS
INIT_RAM_FS
}
PERCPU(PAGE_SIZE)
PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE)
/* We need this dummy segment here */

View File

@ -15,7 +15,7 @@
* HEAD_TEXT_SECTION
* INIT_TEXT_SECTION(PAGE_SIZE)
* INIT_DATA_SECTION(...)
* PERCPU(PAGE_SIZE)
* PERCPU(CACHELINE_SIZE, PAGE_SIZE)
* __init_end = .;
*
* _stext = .;
@ -683,13 +683,18 @@
/**
* PERCPU_VADDR - define output section for percpu area
* @cacheline: cacheline size
* @vaddr: explicit base address (optional)
* @phdr: destination PHDR (optional)
*
* Macro which expands to output section for percpu area. If @vaddr
* is not blank, it specifies explicit base address and all percpu
* symbols will be offset from the given address. If blank, @vaddr
* always equals @laddr + LOAD_OFFSET.
* Macro which expands to output section for percpu area.
*
* @cacheline is used to align subsections to avoid false cacheline
* sharing between subsections for different purposes.
*
* If @vaddr is not blank, it specifies explicit base address and all
* percpu symbols will be offset from the given address. If blank,
* @vaddr always equals @laddr + LOAD_OFFSET.
*
* @phdr defines the output PHDR to use if not blank. Be warned that
* output PHDR is sticky. If @phdr is specified, the next output
@ -700,7 +705,7 @@
* If there is no need to put the percpu section at a predetermined
* address, use PERCPU().
*/
#define PERCPU_VADDR(vaddr, phdr) \
#define PERCPU_VADDR(cacheline, vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- LOAD_OFFSET) { \
@ -708,7 +713,9 @@
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
. = ALIGN(cacheline); \
*(.data..percpu..readmostly) \
. = ALIGN(cacheline); \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
@ -717,18 +724,18 @@
/**
* PERCPU - define output section for percpu area, simple version
* @cacheline: cacheline size
* @align: required alignment
*
* Align to @align and outputs output section for percpu area. This
* macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
* Align to @align and outputs output section for percpu area. This macro
* doesn't manipulate @vaddr or @phdr and __per_cpu_load and
* __per_cpu_start will be identical.
*
* This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
* that __per_cpu_load is defined as a relative symbol against
* .data..percpu which is required for relocatable x86_32
* configuration.
* This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,)
* except that __per_cpu_load is defined as a relative symbol against
* .data..percpu which is required for relocatable x86_32 configuration.
*/
#define PERCPU(align) \
#define PERCPU(cacheline, align) \
. = ALIGN(align); \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
@ -736,7 +743,9 @@
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
. = ALIGN(cacheline); \
*(.data..percpu..readmostly) \
. = ALIGN(cacheline); \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \

View File

@ -255,6 +255,30 @@ extern void __bad_size_call_parameter(void);
pscr2_ret__; \
})
/*
* Special handling for cmpxchg_double. cmpxchg_double is passed two
* percpu variables. The first has to be aligned to a double word
* boundary and the second has to follow directly thereafter.
*/
#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
({ \
bool pdcrb_ret__; \
__verify_pcpu_ptr(&pcp1); \
BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
VM_BUG_ON((unsigned long)(&pcp2) != \
(unsigned long)(&pcp1) + sizeof(pcp1)); \
switch(sizeof(pcp1)) { \
case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
default: \
__bad_size_call_parameter(); break; \
} \
pdcrb_ret__; \
})
#define __pcpu_size_call(stem, variable, ...) \
do { \
__verify_pcpu_ptr(&(variable)); \
@ -500,6 +524,45 @@ do { \
__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
#endif
/*
* cmpxchg_double replaces two adjacent scalars at once. The first
* two parameters are per cpu variables which have to be of the same
* size. A truth value is returned to indicate success or failure
* (since a double register result is difficult to handle). There is
* very limited hardware support for these operations, so only certain
* sizes may work.
*/
#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
int ret__; \
preempt_disable(); \
ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
oval1, oval2, nval1, nval2); \
preempt_enable(); \
ret__; \
})
#ifndef this_cpu_cmpxchg_double
# ifndef this_cpu_cmpxchg_double_1
# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef this_cpu_cmpxchg_double_2
# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef this_cpu_cmpxchg_double_4
# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef this_cpu_cmpxchg_double_8
# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
#endif
/*
* Generic percpu operations that do not require preemption handling.
* Either we do not care about races or the caller has the
@ -703,6 +766,39 @@ do { \
__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
#endif
#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
int __ret = 0; \
if (__this_cpu_read(pcp1) == (oval1) && \
__this_cpu_read(pcp2) == (oval2)) { \
__this_cpu_write(pcp1, (nval1)); \
__this_cpu_write(pcp2, (nval2)); \
__ret = 1; \
} \
(__ret); \
})
#ifndef __this_cpu_cmpxchg_double
# ifndef __this_cpu_cmpxchg_double_1
# define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef __this_cpu_cmpxchg_double_2
# define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef __this_cpu_cmpxchg_double_4
# define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef __this_cpu_cmpxchg_double_8
# define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
#endif
/*
* IRQ safe versions of the per cpu RMW operations. Note that these operations
* are *not* safe against modification of the same variable from another
@ -823,4 +919,36 @@ do { \
__pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
#endif
#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
int ret__; \
unsigned long flags; \
local_irq_save(flags); \
ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
oval1, oval2, nval1, nval2); \
local_irq_restore(flags); \
ret__; \
})
#ifndef irqsafe_cpu_cmpxchg_double
# ifndef irqsafe_cpu_cmpxchg_double_1
# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef irqsafe_cpu_cmpxchg_double_2
# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef irqsafe_cpu_cmpxchg_double_4
# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# ifndef irqsafe_cpu_cmpxchg_double_8
# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
#endif
#endif /* __LINUX_PERCPU_H */