a4831e08b7
o This patch moves the code to verify long mode and SSE to a common file. This code is now shared by trampoline.S, wakeup.S, boot/setup.S and boot/compressed/head.S o So far we used to do very limited check in trampoline.S, wakeup.S and in 32bit entry point. Now all the entry paths are forced to do the exhaustive check, including SSE because verify_cpu is shared. o I am keeping this patch as last in the x86 relocatable series because previous patches have got quite some amount of testing done and don't want to distrub that. So that if there is problem introduced by this patch, at least it can be easily isolated. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de>
111 lines
3.0 KiB
ArmAsm
111 lines
3.0 KiB
ArmAsm
/*
|
|
*
|
|
* verify_cpu.S - Code for cpu long mode and SSE verification. This
|
|
* code has been borrowed from boot/setup.S and was introduced by
|
|
* Andi Kleen.
|
|
*
|
|
* Copyright (c) 2007 Andi Kleen (ak@suse.de)
|
|
* Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
|
|
* Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
|
|
*
|
|
* This source code is licensed under the GNU General Public License,
|
|
* Version 2. See the file COPYING for more details.
|
|
*
|
|
* This is a common code for verification whether CPU supports
|
|
* long mode and SSE or not. It is not called directly instead this
|
|
* file is included at various places and compiled in that context.
|
|
* Following are the current usage.
|
|
*
|
|
* This file is included by both 16bit and 32bit code.
|
|
*
|
|
* arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
|
|
* arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
|
|
* arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
|
|
* arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
|
|
*
|
|
* verify_cpu, returns the status of cpu check in register %eax.
|
|
* 0: Success 1: Failure
|
|
*
|
|
* The caller needs to check for the error code and take the action
|
|
* appropriately. Either display a message or halt.
|
|
*/
|
|
|
|
verify_cpu:
|
|
|
|
pushfl # Save caller passed flags
|
|
pushl $0 # Kill any dangerous flags
|
|
popfl
|
|
|
|
/* minimum CPUID flags for x86-64 */
|
|
/* see http://www.x86-64.org/lists/discuss/msg02971.html */
|
|
#define SSE_MASK ((1<<25)|(1<<26))
|
|
#define REQUIRED_MASK1 ((1<<0)|(1<<3)|(1<<4)|(1<<5)|(1<<6)|(1<<8)|\
|
|
(1<<13)|(1<<15)|(1<<24))
|
|
#define REQUIRED_MASK2 (1<<29)
|
|
pushfl # standard way to check for cpuid
|
|
popl %eax
|
|
movl %eax,%ebx
|
|
xorl $0x200000,%eax
|
|
pushl %eax
|
|
popfl
|
|
pushfl
|
|
popl %eax
|
|
cmpl %eax,%ebx
|
|
jz verify_cpu_no_longmode # cpu has no cpuid
|
|
|
|
movl $0x0,%eax # See if cpuid 1 is implemented
|
|
cpuid
|
|
cmpl $0x1,%eax
|
|
jb verify_cpu_no_longmode # no cpuid 1
|
|
|
|
xor %di,%di
|
|
cmpl $0x68747541,%ebx # AuthenticAMD
|
|
jnz verify_cpu_noamd
|
|
cmpl $0x69746e65,%edx
|
|
jnz verify_cpu_noamd
|
|
cmpl $0x444d4163,%ecx
|
|
jnz verify_cpu_noamd
|
|
mov $1,%di # cpu is from AMD
|
|
|
|
verify_cpu_noamd:
|
|
movl $0x1,%eax # Does the cpu have what it takes
|
|
cpuid
|
|
andl $REQUIRED_MASK1,%edx
|
|
xorl $REQUIRED_MASK1,%edx
|
|
jnz verify_cpu_no_longmode
|
|
|
|
movl $0x80000000,%eax # See if extended cpuid is implemented
|
|
cpuid
|
|
cmpl $0x80000001,%eax
|
|
jb verify_cpu_no_longmode # no extended cpuid
|
|
|
|
movl $0x80000001,%eax # Does the cpu have what it takes
|
|
cpuid
|
|
andl $REQUIRED_MASK2,%edx
|
|
xorl $REQUIRED_MASK2,%edx
|
|
jnz verify_cpu_no_longmode
|
|
|
|
verify_cpu_sse_test:
|
|
movl $1,%eax
|
|
cpuid
|
|
andl $SSE_MASK,%edx
|
|
cmpl $SSE_MASK,%edx
|
|
je verify_cpu_sse_ok
|
|
test %di,%di
|
|
jz verify_cpu_no_longmode # only try to force SSE on AMD
|
|
movl $0xc0010015,%ecx # HWCR
|
|
rdmsr
|
|
btr $15,%eax # enable SSE
|
|
wrmsr
|
|
xor %di,%di # don't loop
|
|
jmp verify_cpu_sse_test # try again
|
|
|
|
verify_cpu_no_longmode:
|
|
popfl # Restore caller passed flags
|
|
movl $1,%eax
|
|
ret
|
|
verify_cpu_sse_ok:
|
|
popfl # Restore caller passed flags
|
|
xorl %eax, %eax
|
|
ret
|