Merge branch 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, asm: Use a lower case name for the end macro in atomic64_386_32.S
  x86, asm: Refactor atomic64_386_32.S to support old binutils and be cleaner
  x86: Document __phys_reloc_hide() usage in __pa_symbol()
  x86, apic: Map the local apic when parsing the MP table.
This commit is contained in:
Linus Torvalds 2010-08-13 10:35:48 -07:00
commit c029b55af7
4 changed files with 141 additions and 96 deletions

View File

@ -37,6 +37,13 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
/* __pa_symbol should be used for C visible symbols.
This seems to be the official gcc blessed way to do such arithmetic. */
/*
* We need __phys_reloc_hide() here because gcc may assume that there is no
* overflow during __pa() calculation and can optimize it unexpectedly.
* Newer versions of gcc provide -fno-strict-overflow switch to handle this
* case properly. Once all supported versions of gcc understand it, we can
* remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
*/
#define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x)))
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))

View File

@ -1606,7 +1606,7 @@ void __init init_apic_mappings(void)
* acpi lapic path already maps that address in
* acpi_register_lapic_address()
*/
if (!acpi_lapic)
if (!acpi_lapic && !smp_found_config)
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",

View File

@ -274,6 +274,18 @@ static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
static void __init smp_register_lapic_address(unsigned long address)
{
mp_lapic_addr = address;
set_fixmap_nocache(FIX_APIC_BASE, address);
if (boot_cpu_physical_apicid == -1U) {
boot_cpu_physical_apicid = read_apic_id();
apic_version[boot_cpu_physical_apicid] =
GET_APIC_VERSION(apic_read(APIC_LVR));
}
}
static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
{
char str[16];
@ -295,6 +307,10 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
if (early)
return 1;
/* Initialize the lapic mapping */
if (!acpi_lapic)
smp_register_lapic_address(mpc->lapic);
if (mpc->oemptr)
x86_init.mpparse.smp_read_mpc_oem(mpc);

View File

@ -25,150 +25,172 @@
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro BEGIN func reg
$v = \reg
#define BEGIN(op) \
.macro endp; \
CFI_ENDPROC; \
ENDPROC(atomic64_##op##_386); \
.purgem endp; \
.endm; \
ENTRY(atomic64_##op##_386); \
CFI_STARTPROC; \
LOCK v;
ENTRY(atomic64_\func\()_386)
CFI_STARTPROC
LOCK $v
#define ENDP endp
.macro RETURN
UNLOCK $v
#define RET \
UNLOCK v; \
ret
.endm
.macro END_
CFI_ENDPROC
ENDPROC(atomic64_\func\()_386)
.purgem RETURN
.purgem END_
.purgem END
.endm
#define RET_ENDP \
RET; \
ENDP
.macro END
RETURN
END_
.endm
.endm
#define v %ecx
BEGIN(read)
movl (v), %eax
movl 4(v), %edx
RET_ENDP
#undef v
BEGIN read %ecx
movl ($v), %eax
movl 4($v), %edx
END
#define v %esi
BEGIN(set)
movl %ebx, (v)
movl %ecx, 4(v)
RET_ENDP
#undef v
BEGIN set %esi
movl %ebx, ($v)
movl %ecx, 4($v)
END
#define v %esi
BEGIN(xchg)
movl (v), %eax
movl 4(v), %edx
movl %ebx, (v)
movl %ecx, 4(v)
RET_ENDP
#undef v
BEGIN xchg %esi
movl ($v), %eax
movl 4($v), %edx
movl %ebx, ($v)
movl %ecx, 4($v)
END
#define v %ecx
BEGIN(add)
addl %eax, (v)
adcl %edx, 4(v)
RET_ENDP
#undef v
BEGIN add %ecx
addl %eax, ($v)
adcl %edx, 4($v)
END
#define v %ecx
BEGIN(add_return)
addl (v), %eax
adcl 4(v), %edx
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
#undef v
BEGIN add_return %ecx
addl ($v), %eax
adcl 4($v), %edx
movl %eax, ($v)
movl %edx, 4($v)
END
#define v %ecx
BEGIN(sub)
subl %eax, (v)
sbbl %edx, 4(v)
RET_ENDP
#undef v
BEGIN sub %ecx
subl %eax, ($v)
sbbl %edx, 4($v)
END
BEGIN sub_return %ecx
#define v %ecx
BEGIN(sub_return)
negl %edx
negl %eax
sbbl $0, %edx
addl ($v), %eax
adcl 4($v), %edx
movl %eax, ($v)
movl %edx, 4($v)
END
addl (v), %eax
adcl 4(v), %edx
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
#undef v
BEGIN inc %esi
addl $1, ($v)
adcl $0, 4($v)
END
#define v %esi
BEGIN(inc)
addl $1, (v)
adcl $0, 4(v)
RET_ENDP
#undef v
BEGIN inc_return %esi
movl ($v), %eax
movl 4($v), %edx
#define v %esi
BEGIN(inc_return)
movl (v), %eax
movl 4(v), %edx
addl $1, %eax
adcl $0, %edx
movl %eax, ($v)
movl %edx, 4($v)
END
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
#undef v
BEGIN dec %esi
subl $1, ($v)
sbbl $0, 4($v)
END
#define v %esi
BEGIN(dec)
subl $1, (v)
sbbl $0, 4(v)
RET_ENDP
#undef v
BEGIN dec_return %esi
movl ($v), %eax
movl 4($v), %edx
#define v %esi
BEGIN(dec_return)
movl (v), %eax
movl 4(v), %edx
subl $1, %eax
sbbl $0, %edx
movl %eax, ($v)
movl %edx, 4($v)
END
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
#undef v
BEGIN add_unless %ecx
#define v %ecx
BEGIN(add_unless)
addl %eax, %esi
adcl %edx, %edi
addl ($v), %eax
adcl 4($v), %edx
addl (v), %eax
adcl 4(v), %edx
cmpl %eax, %esi
je 3f
1:
movl %eax, ($v)
movl %edx, 4($v)
movl %eax, (v)
movl %edx, 4(v)
movl $1, %eax
2:
RETURN
RET
3:
cmpl %edx, %edi
jne 1b
xorl %eax, %eax
jmp 2b
END_
ENDP
#undef v
BEGIN inc_not_zero %esi
movl ($v), %eax
movl 4($v), %edx
#define v %esi
BEGIN(inc_not_zero)
movl (v), %eax
movl 4(v), %edx
testl %eax, %eax
je 3f
1:
addl $1, %eax
adcl $0, %edx
movl %eax, ($v)
movl %edx, 4($v)
movl %eax, (v)
movl %edx, 4(v)
movl $1, %eax
2:
RETURN
RET
3:
testl %edx, %edx
jne 1b
jmp 2b
END_
ENDP
#undef v
BEGIN dec_if_positive %esi
movl ($v), %eax
movl 4($v), %edx
#define v %esi
BEGIN(dec_if_positive)
movl (v), %eax
movl 4(v), %edx
subl $1, %eax
sbbl $0, %edx
js 1f
movl %eax, ($v)
movl %edx, 4($v)
movl %eax, (v)
movl %edx, 4(v)
1:
END
RET_ENDP
#undef v