[ARM] 3152/1: make various assembly local labels actually local (the rest)

Patch from Nicolas Pitre

For assembly labels to actually be local they must start with ".L" and
not only "." otherwise they still remain visible in the final link and
clutter kallsyms needlessly, and possibly make for unclear symbolic
backtrace. This patch simply inserts a"L" where appropriate. The code
itself is unchanged.

Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
Nicolas Pitre 2005-11-11 21:51:49 +00:00 committed by Russell King
parent a9c4814d8d
commit 8adbb3718d
4 changed files with 61 additions and 59 deletions

View File

@ -26,7 +26,7 @@ td1 .req r4 @ save before use
td2 .req r5 @ save before use td2 .req r5 @ save before use
td3 .req lr td3 .req lr
.zero: mov r0, sum .Lzero: mov r0, sum
add sp, sp, #4 add sp, sp, #4
ldr pc, [sp], #4 ldr pc, [sp], #4
@ -34,8 +34,8 @@ td3 .req lr
* Handle 0 to 7 bytes, with any alignment of source and * Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0 * destination pointers. Note that when we get here, C = 0
*/ */
.less8: teq len, #0 @ check for zero count .Lless8: teq len, #0 @ check for zero count
beq .zero beq .Lzero
/* we must have at least one byte. */ /* we must have at least one byte. */
tst buf, #1 @ odd address? tst buf, #1 @ odd address?
@ -44,12 +44,12 @@ td3 .req lr
subne len, len, #1 subne len, len, #1
adcnes sum, sum, td0, put_byte_1 adcnes sum, sum, td0, put_byte_1
.less4: tst len, #6 .Lless4: tst len, #6
beq .less8_byte beq .Lless8_byte
/* we are now half-word aligned */ /* we are now half-word aligned */
.less8_wordlp: .Lless8_wordlp:
#if __LINUX_ARM_ARCH__ >= 4 #if __LINUX_ARM_ARCH__ >= 4
ldrh td0, [buf], #2 ldrh td0, [buf], #2
sub len, len, #2 sub len, len, #2
@ -65,19 +65,19 @@ td3 .req lr
#endif #endif
adcs sum, sum, td0 adcs sum, sum, td0
tst len, #6 tst len, #6
bne .less8_wordlp bne .Lless8_wordlp
.less8_byte: tst len, #1 @ odd number of bytes .Lless8_byte: tst len, #1 @ odd number of bytes
ldrneb td0, [buf], #1 @ include last byte ldrneb td0, [buf], #1 @ include last byte
adcnes sum, sum, td0, put_byte_0 @ update checksum adcnes sum, sum, td0, put_byte_0 @ update checksum
.done: adc r0, sum, #0 @ collect up the last carry .Ldone: adc r0, sum, #0 @ collect up the last carry
ldr td0, [sp], #4 ldr td0, [sp], #4
tst td0, #1 @ check buffer alignment tst td0, #1 @ check buffer alignment
movne r0, r0, ror #8 @ rotate checksum by 8 bits movne r0, r0, ror #8 @ rotate checksum by 8 bits
ldr pc, [sp], #4 @ return ldr pc, [sp], #4 @ return
.not_aligned: tst buf, #1 @ odd address .Lnot_aligned: tst buf, #1 @ odd address
ldrneb td0, [buf], #1 @ make even ldrneb td0, [buf], #1 @ make even
subne len, len, #1 subne len, len, #1
adcnes sum, sum, td0, put_byte_1 @ update checksum adcnes sum, sum, td0, put_byte_1 @ update checksum
@ -102,14 +102,14 @@ td3 .req lr
ENTRY(csum_partial) ENTRY(csum_partial)
stmfd sp!, {buf, lr} stmfd sp!, {buf, lr}
cmp len, #8 @ Ensure that we have at least cmp len, #8 @ Ensure that we have at least
blo .less8 @ 8 bytes to copy. blo .Lless8 @ 8 bytes to copy.
tst buf, #1 tst buf, #1
movne sum, sum, ror #8 movne sum, sum, ror #8
adds sum, sum, #0 @ C = 0 adds sum, sum, #0 @ C = 0
tst buf, #3 @ Test destination alignment tst buf, #3 @ Test destination alignment
blne .not_aligned @ aligh destination, return here blne .Lnot_aligned @ align destination, return here
1: bics ip, len, #31 1: bics ip, len, #31
beq 3f beq 3f
@ -131,11 +131,11 @@ ENTRY(csum_partial)
ldmfd sp!, {r4 - r5} ldmfd sp!, {r4 - r5}
3: tst len, #0x1c @ should not change C 3: tst len, #0x1c @ should not change C
beq .less4 beq .Lless4
4: ldr td0, [buf], #4 4: ldr td0, [buf], #4
sub len, len, #4 sub len, len, #4
adcs sum, sum, td0 adcs sum, sum, td0
tst len, #0x1c tst len, #0x1c
bne 4b bne 4b
b .less4 b .Lless4

View File

@ -22,7 +22,7 @@ dst .req r1
len .req r2 len .req r2
sum .req r3 sum .req r3
.zero: mov r0, sum .Lzero: mov r0, sum
load_regs ea load_regs ea
/* /*
@ -31,8 +31,9 @@ sum .req r3
* the length. Note that the source pointer hasn't been * the length. Note that the source pointer hasn't been
* aligned yet. * aligned yet.
*/ */
.dst_unaligned: tst dst, #1 .Ldst_unaligned:
beq .dst_16bit tst dst, #1
beq .Ldst_16bit
load1b ip load1b ip
sub len, len, #1 sub len, len, #1
@ -41,7 +42,7 @@ sum .req r3
tst dst, #2 tst dst, #2
moveq pc, lr @ dst is now 32bit aligned moveq pc, lr @ dst is now 32bit aligned
.dst_16bit: load2b r8, ip .Ldst_16bit: load2b r8, ip
sub len, len, #2 sub len, len, #2
adcs sum, sum, r8, put_byte_0 adcs sum, sum, r8, put_byte_0
strb r8, [dst], #1 strb r8, [dst], #1
@ -53,12 +54,12 @@ sum .req r3
* Handle 0 to 7 bytes, with any alignment of source and * Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0 * destination pointers. Note that when we get here, C = 0
*/ */
.less8: teq len, #0 @ check for zero count .Lless8: teq len, #0 @ check for zero count
beq .zero beq .Lzero
/* we must have at least one byte. */ /* we must have at least one byte. */
tst dst, #1 @ dst 16-bit aligned tst dst, #1 @ dst 16-bit aligned
beq .less8_aligned beq .Lless8_aligned
/* Align dst */ /* Align dst */
load1b ip load1b ip
@ -66,7 +67,7 @@ sum .req r3
adcs sum, sum, ip, put_byte_1 @ update checksum adcs sum, sum, ip, put_byte_1 @ update checksum
strb ip, [dst], #1 strb ip, [dst], #1
tst len, #6 tst len, #6
beq .less8_byteonly beq .Lless8_byteonly
1: load2b r8, ip 1: load2b r8, ip
sub len, len, #2 sub len, len, #2
@ -74,15 +75,16 @@ sum .req r3
strb r8, [dst], #1 strb r8, [dst], #1
adcs sum, sum, ip, put_byte_1 adcs sum, sum, ip, put_byte_1
strb ip, [dst], #1 strb ip, [dst], #1
.less8_aligned: tst len, #6 .Lless8_aligned:
tst len, #6
bne 1b bne 1b
.less8_byteonly: .Lless8_byteonly:
tst len, #1 tst len, #1
beq .done beq .Ldone
load1b r8 load1b r8
adcs sum, sum, r8, put_byte_0 @ update checksum adcs sum, sum, r8, put_byte_0 @ update checksum
strb r8, [dst], #1 strb r8, [dst], #1
b .done b .Ldone
FN_ENTRY FN_ENTRY
mov ip, sp mov ip, sp
@ -90,11 +92,11 @@ FN_ENTRY
sub fp, ip, #4 sub fp, ip, #4
cmp len, #8 @ Ensure that we have at least cmp len, #8 @ Ensure that we have at least
blo .less8 @ 8 bytes to copy. blo .Lless8 @ 8 bytes to copy.
adds sum, sum, #0 @ C = 0 adds sum, sum, #0 @ C = 0
tst dst, #3 @ Test destination alignment tst dst, #3 @ Test destination alignment
blne .dst_unaligned @ align destination, return here blne .Ldst_unaligned @ align destination, return here
/* /*
* Ok, the dst pointer is now 32bit aligned, and we know * Ok, the dst pointer is now 32bit aligned, and we know
@ -103,7 +105,7 @@ FN_ENTRY
*/ */
tst src, #3 @ Test source alignment tst src, #3 @ Test source alignment
bne .src_not_aligned bne .Lsrc_not_aligned
/* Routine for src & dst aligned */ /* Routine for src & dst aligned */
@ -136,17 +138,17 @@ FN_ENTRY
adcs sum, sum, r4 adcs sum, sum, r4
4: ands len, len, #3 4: ands len, len, #3
beq .done beq .Ldone
load1l r4 load1l r4
tst len, #2 tst len, #2
mov r5, r4, get_byte_0 mov r5, r4, get_byte_0
beq .exit beq .Lexit
adcs sum, sum, r4, push #16 adcs sum, sum, r4, push #16
strb r5, [dst], #1 strb r5, [dst], #1
mov r5, r4, get_byte_1 mov r5, r4, get_byte_1
strb r5, [dst], #1 strb r5, [dst], #1
mov r5, r4, get_byte_2 mov r5, r4, get_byte_2
.exit: tst len, #1 .Lexit: tst len, #1
strneb r5, [dst], #1 strneb r5, [dst], #1
andne r5, r5, #255 andne r5, r5, #255
adcnes sum, sum, r5, put_byte_0 adcnes sum, sum, r5, put_byte_0
@ -157,20 +159,20 @@ FN_ENTRY
* the inefficient byte manipulations in the * the inefficient byte manipulations in the
* architecture independent code. * architecture independent code.
*/ */
.done: adc r0, sum, #0 .Ldone: adc r0, sum, #0
ldr sum, [sp, #0] @ dst ldr sum, [sp, #0] @ dst
tst sum, #1 tst sum, #1
movne r0, r0, ror #8 movne r0, r0, ror #8
load_regs ea load_regs ea
.src_not_aligned: .Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment adc sum, sum, #0 @ include C from dst alignment
and ip, src, #3 and ip, src, #3
bic src, src, #3 bic src, src, #3
load1l r5 load1l r5
cmp ip, #2 cmp ip, #2
beq .src2_aligned beq .Lsrc2_aligned
bhi .src3_aligned bhi .Lsrc3_aligned
mov r4, r5, pull #8 @ C = 0 mov r4, r5, pull #8 @ C = 0
bics ip, len, #15 bics ip, len, #15
beq 2f beq 2f
@ -211,18 +213,18 @@ FN_ENTRY
adcs sum, sum, r4 adcs sum, sum, r4
mov r4, r5, pull #8 mov r4, r5, pull #8
4: ands len, len, #3 4: ands len, len, #3
beq .done beq .Ldone
mov r5, r4, get_byte_0 mov r5, r4, get_byte_0
tst len, #2 tst len, #2
beq .exit beq .Lexit
adcs sum, sum, r4, push #16 adcs sum, sum, r4, push #16
strb r5, [dst], #1 strb r5, [dst], #1
mov r5, r4, get_byte_1 mov r5, r4, get_byte_1
strb r5, [dst], #1 strb r5, [dst], #1
mov r5, r4, get_byte_2 mov r5, r4, get_byte_2
b .exit b .Lexit
.src2_aligned: mov r4, r5, pull #16 .Lsrc2_aligned: mov r4, r5, pull #16
adds sum, sum, #0 adds sum, sum, #0
bics ip, len, #15 bics ip, len, #15
beq 2f beq 2f
@ -263,20 +265,20 @@ FN_ENTRY
adcs sum, sum, r4 adcs sum, sum, r4
mov r4, r5, pull #16 mov r4, r5, pull #16
4: ands len, len, #3 4: ands len, len, #3
beq .done beq .Ldone
mov r5, r4, get_byte_0 mov r5, r4, get_byte_0
tst len, #2 tst len, #2
beq .exit beq .Lexit
adcs sum, sum, r4 adcs sum, sum, r4
strb r5, [dst], #1 strb r5, [dst], #1
mov r5, r4, get_byte_1 mov r5, r4, get_byte_1
strb r5, [dst], #1 strb r5, [dst], #1
tst len, #1 tst len, #1
beq .done beq .Ldone
load1b r5 load1b r5
b .exit b .Lexit
.src3_aligned: mov r4, r5, pull #24 .Lsrc3_aligned: mov r4, r5, pull #24
adds sum, sum, #0 adds sum, sum, #0
bics ip, len, #15 bics ip, len, #15
beq 2f beq 2f
@ -317,10 +319,10 @@ FN_ENTRY
adcs sum, sum, r4 adcs sum, sum, r4
mov r4, r5, pull #24 mov r4, r5, pull #24
4: ands len, len, #3 4: ands len, len, #3
beq .done beq .Ldone
mov r5, r4, get_byte_0 mov r5, r4, get_byte_0
tst len, #2 tst len, #2
beq .exit beq .Lexit
strb r5, [dst], #1 strb r5, [dst], #1
adcs sum, sum, r4 adcs sum, sum, r4
load1l r4 load1l r4
@ -328,4 +330,4 @@ FN_ENTRY
strb r5, [dst], #1 strb r5, [dst], #1
adcs sum, sum, r4, push #24 adcs sum, sum, r4, push #24
mov r5, r4, get_byte_1 mov r5, r4, get_byte_1
b .exit b .Lexit

View File

@ -11,7 +11,7 @@
#include <asm/assembler.h> #include <asm/assembler.h>
.text .text
LC0: .word loops_per_jiffy .LC0: .word loops_per_jiffy
/* /*
* 0 <= r0 <= 2000 * 0 <= r0 <= 2000
@ -21,7 +21,7 @@ ENTRY(__udelay)
orr r2, r2, #0x00db orr r2, r2, #0x00db
mul r0, r2, r0 mul r0, r2, r0
ENTRY(__const_udelay) @ 0 <= r0 <= 0x01ffffff ENTRY(__const_udelay) @ 0 <= r0 <= 0x01ffffff
ldr r2, LC0 ldr r2, .LC0
ldr r2, [r2] @ max = 0x0fffffff ldr r2, [r2] @ max = 0x0fffffff
mov r0, r0, lsr #11 @ max = 0x00003fff mov r0, r0, lsr #11 @ max = 0x00003fff
mov r2, r2, lsr #11 @ max = 0x0003ffff mov r2, r2, lsr #11 @ max = 0x0003ffff

View File

@ -27,7 +27,7 @@ ENTRY(_find_first_zero_bit_le)
mov r2, #0 mov r2, #0
1: ldrb r3, [r0, r2, lsr #3] 1: ldrb r3, [r0, r2, lsr #3]
eors r3, r3, #0xff @ invert bits eors r3, r3, #0xff @ invert bits
bne .found @ any now set - found zero bit bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more? 2: cmp r2, r1 @ any more?
blo 1b blo 1b
@ -46,7 +46,7 @@ ENTRY(_find_next_zero_bit_le)
ldrb r3, [r0, r2, lsr #3] ldrb r3, [r0, r2, lsr #3]
eor r3, r3, #0xff @ now looking for a 1 bit eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits movs r3, r3, lsr ip @ shift off unused bits
bne .found bne .L_found
orr r2, r2, #7 @ if zero, then no bits here orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit b 2b @ loop for next bit
@ -61,7 +61,7 @@ ENTRY(_find_first_bit_le)
mov r2, #0 mov r2, #0
1: ldrb r3, [r0, r2, lsr #3] 1: ldrb r3, [r0, r2, lsr #3]
movs r3, r3 movs r3, r3
bne .found @ any now set - found zero bit bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more? 2: cmp r2, r1 @ any more?
blo 1b blo 1b
@ -79,7 +79,7 @@ ENTRY(_find_next_bit_le)
beq 1b @ If new byte, goto old routine beq 1b @ If new byte, goto old routine
ldrb r3, [r0, r2, lsr #3] ldrb r3, [r0, r2, lsr #3]
movs r3, r3, lsr ip @ shift off unused bits movs r3, r3, lsr ip @ shift off unused bits
bne .found bne .L_found
orr r2, r2, #7 @ if zero, then no bits here orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit b 2b @ loop for next bit
@ -93,7 +93,7 @@ ENTRY(_find_first_zero_bit_be)
1: eor r3, r2, #0x18 @ big endian byte ordering 1: eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3] ldrb r3, [r0, r3, lsr #3]
eors r3, r3, #0xff @ invert bits eors r3, r3, #0xff @ invert bits
bne .found @ any now set - found zero bit bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more? 2: cmp r2, r1 @ any more?
blo 1b blo 1b
@ -109,7 +109,7 @@ ENTRY(_find_next_zero_bit_be)
ldrb r3, [r0, r3, lsr #3] ldrb r3, [r0, r3, lsr #3]
eor r3, r3, #0xff @ now looking for a 1 bit eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits movs r3, r3, lsr ip @ shift off unused bits
bne .found bne .L_found
orr r2, r2, #7 @ if zero, then no bits here orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit b 2b @ loop for next bit
@ -121,7 +121,7 @@ ENTRY(_find_first_bit_be)
1: eor r3, r2, #0x18 @ big endian byte ordering 1: eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3] ldrb r3, [r0, r3, lsr #3]
movs r3, r3 movs r3, r3
bne .found @ any now set - found zero bit bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more? 2: cmp r2, r1 @ any more?
blo 1b blo 1b
@ -136,7 +136,7 @@ ENTRY(_find_next_bit_be)
eor r3, r2, #0x18 @ big endian byte ordering eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3] ldrb r3, [r0, r3, lsr #3]
movs r3, r3, lsr ip @ shift off unused bits movs r3, r3, lsr ip @ shift off unused bits
bne .found bne .L_found
orr r2, r2, #7 @ if zero, then no bits here orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit b 2b @ loop for next bit
@ -146,7 +146,7 @@ ENTRY(_find_next_bit_be)
/* /*
* One or more bits in the LSB of r3 are assumed to be set. * One or more bits in the LSB of r3 are assumed to be set.
*/ */
.found: .L_found:
#if __LINUX_ARM_ARCH__ >= 5 #if __LINUX_ARM_ARCH__ >= 5
rsb r1, r3, #0 rsb r1, r3, #0
and r3, r3, r1 and r3, r3, r1