x86/asm: Clean up copy_page_*() comments and code

Modern CPUs use fast-string instruction to accelerate copy
performance, by combining data into 128 bit chunks.

Modify comments and coding style to match it.

Signed-off-by: Ma Ling <ling.ma@intel.com>
Cc: iant@google.com
Link: http://lkml.kernel.org/r/1350503565-19167-1-git-send-email-ling.ma@intel.com
[ Cleaned up the clean up. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ma Ling 2012-10-18 03:52:45 +08:00 committed by Ingo Molnar
parent 0e9e3e306c
commit 269833bd5a
1 changed files with 56 additions and 58 deletions

View File

@ -5,91 +5,89 @@
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
ALIGN ALIGN
copy_page_c: copy_page_rep:
CFI_STARTPROC CFI_STARTPROC
movl $4096/8,%ecx movl $4096/8, %ecx
rep movsq rep movsq
ret ret
CFI_ENDPROC CFI_ENDPROC
ENDPROC(copy_page_c) ENDPROC(copy_page_rep)
/* Don't use streaming store because it's better when the target /*
ends up in cache. */ * Don't use streaming copy unless the CPU indicates X86_FEATURE_REP_GOOD.
* Could vary the prefetch distance based on SMP/UP.
/* Could vary the prefetch distance based on SMP/UP */ */
ENTRY(copy_page) ENTRY(copy_page)
CFI_STARTPROC CFI_STARTPROC
subq $2*8,%rsp subq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET 2*8 CFI_ADJUST_CFA_OFFSET 2*8
movq %rbx,(%rsp) movq %rbx, (%rsp)
CFI_REL_OFFSET rbx, 0 CFI_REL_OFFSET rbx, 0
movq %r12,1*8(%rsp) movq %r12, 1*8(%rsp)
CFI_REL_OFFSET r12, 1*8 CFI_REL_OFFSET r12, 1*8
movl $(4096/64)-5,%ecx movl $(4096/64)-5, %ecx
.p2align 4 .p2align 4
.Loop64: .Loop64:
dec %rcx dec %rcx
movq 0x8*0(%rsi), %rax
movq (%rsi), %rax movq 0x8*1(%rsi), %rbx
movq 8 (%rsi), %rbx movq 0x8*2(%rsi), %rdx
movq 16 (%rsi), %rdx movq 0x8*3(%rsi), %r8
movq 24 (%rsi), %r8 movq 0x8*4(%rsi), %r9
movq 32 (%rsi), %r9 movq 0x8*5(%rsi), %r10
movq 40 (%rsi), %r10 movq 0x8*6(%rsi), %r11
movq 48 (%rsi), %r11 movq 0x8*7(%rsi), %r12
movq 56 (%rsi), %r12
prefetcht0 5*64(%rsi) prefetcht0 5*64(%rsi)
movq %rax, (%rdi) movq %rax, 0x8*0(%rdi)
movq %rbx, 8 (%rdi) movq %rbx, 0x8*1(%rdi)
movq %rdx, 16 (%rdi) movq %rdx, 0x8*2(%rdi)
movq %r8, 24 (%rdi) movq %r8, 0x8*3(%rdi)
movq %r9, 32 (%rdi) movq %r9, 0x8*4(%rdi)
movq %r10, 40 (%rdi) movq %r10, 0x8*5(%rdi)
movq %r11, 48 (%rdi) movq %r11, 0x8*6(%rdi)
movq %r12, 56 (%rdi) movq %r12, 0x8*7(%rdi)
leaq 64 (%rsi), %rsi leaq 64 (%rsi), %rsi
leaq 64 (%rdi), %rdi leaq 64 (%rdi), %rdi
jnz .Loop64 jnz .Loop64
movl $5,%ecx movl $5, %ecx
.p2align 4 .p2align 4
.Loop2: .Loop2:
decl %ecx decl %ecx
movq (%rsi), %rax movq 0x8*0(%rsi), %rax
movq 8 (%rsi), %rbx movq 0x8*1(%rsi), %rbx
movq 16 (%rsi), %rdx movq 0x8*2(%rsi), %rdx
movq 24 (%rsi), %r8 movq 0x8*3(%rsi), %r8
movq 32 (%rsi), %r9 movq 0x8*4(%rsi), %r9
movq 40 (%rsi), %r10 movq 0x8*5(%rsi), %r10
movq 48 (%rsi), %r11 movq 0x8*6(%rsi), %r11
movq 56 (%rsi), %r12 movq 0x8*7(%rsi), %r12
movq %rax, (%rdi) movq %rax, 0x8*0(%rdi)
movq %rbx, 8 (%rdi) movq %rbx, 0x8*1(%rdi)
movq %rdx, 16 (%rdi) movq %rdx, 0x8*2(%rdi)
movq %r8, 24 (%rdi) movq %r8, 0x8*3(%rdi)
movq %r9, 32 (%rdi) movq %r9, 0x8*4(%rdi)
movq %r10, 40 (%rdi) movq %r10, 0x8*5(%rdi)
movq %r11, 48 (%rdi) movq %r11, 0x8*6(%rdi)
movq %r12, 56 (%rdi) movq %r12, 0x8*7(%rdi)
leaq 64(%rdi),%rdi
leaq 64(%rsi),%rsi
leaq 64(%rdi), %rdi
leaq 64(%rsi), %rsi
jnz .Loop2 jnz .Loop2
movq (%rsp),%rbx movq (%rsp), %rbx
CFI_RESTORE rbx CFI_RESTORE rbx
movq 1*8(%rsp),%r12 movq 1*8(%rsp), %r12
CFI_RESTORE r12 CFI_RESTORE r12
addq $2*8,%rsp addq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET -2*8 CFI_ADJUST_CFA_OFFSET -2*8
ret ret
.Lcopy_page_end: .Lcopy_page_end:
@ -103,7 +101,7 @@ ENDPROC(copy_page)
.section .altinstr_replacement,"ax" .section .altinstr_replacement,"ax"
1: .byte 0xeb /* jmp <disp8> */ 1: .byte 0xeb /* jmp <disp8> */
.byte (copy_page_c - copy_page) - (2f - 1b) /* offset */ .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
2: 2:
.previous .previous
.section .altinstructions,"a" .section .altinstructions,"a"