Updated from ../=mpn/gmp-1.910

This commit is contained in:
Roland McGrath 1995-12-04 18:37:48 +00:00
parent 7def3d92a4
commit 2b30df1785
7 changed files with 489 additions and 2 deletions

76
sysdeps/m68k/add_n.S Normal file
View File

@ -0,0 +1,76 @@
/* mc68020 __mpn_add_n -- Add two limb vectors of the same length > 0 and store
sum in a third limb vector.
Copyright (C) 1992, 1994 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of the GNU Library General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
License for more details.
You should have received a copy of the GNU Library General Public License
along with the GNU MP Library; see the file COPYING.LIB. If not, write to
the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
/*
INPUT PARAMETERS
res_ptr (sp + 4)
s1_ptr (sp + 8)
s2_ptr (sp + 16)
size (sp + 12)
*/
#include "asm-syntax.h"
TEXT
ALIGN
GLOBL ___mpn_add_n
LAB(___mpn_add_n)
/* Save used registers on the stack. */
INSN2(move,l ,MEM_PREDEC(sp),d2)
INSN2(move,l ,MEM_PREDEC(sp),a2)
/* Copy the arguments to registers. Better use movem? */
INSN2(move,l ,a2,MEM_DISP(sp,12))
INSN2(move,l ,a0,MEM_DISP(sp,16))
INSN2(move,l ,a1,MEM_DISP(sp,20))
INSN2(move,l ,d2,MEM_DISP(sp,24))
INSN2(eor,w ,d2,#1)
INSN2(lsr,l ,d2,#1)
bcc L1
INSN2(subq,l ,d2,#1) /* clears cy as side effect */
LAB(Loop)
INSN2(move,l ,d0,MEM_POSTINC(a0))
INSN2(move,l ,d1,MEM_POSTINC(a1))
INSN2(addx,l ,d0,d1)
INSN2(move,l ,MEM_POSTINC(a2),d0)
LAB(L1) INSN2(move,l ,d0,MEM_POSTINC(a0))
INSN2(move,l ,d1,MEM_POSTINC(a1))
INSN2(addx,l ,d0,d1)
INSN2(move,l ,MEM_POSTINC(a2),d0)
dbf d2,Loop /* loop until 16 lsb of %4 == -1 */
INSN2(subx,l ,d0,d0) /* d0 <= -cy; save cy as 0 or -1 in d0 */
INSN2(sub,l ,d2,#0x10000)
bcs L2
INSN2(add,l ,d0,d0) /* restore cy */
bra Loop
LAB(L2)
INSN1(neg,l ,d0)
/* Restore used registers from stack frame. */
INSN2(move,l ,a2,MEM_POSTINC(sp))
INSN2(move,l ,d2,MEM_POSTINC(sp))
rts

76
sysdeps/m68k/sub_n.S Normal file
View File

@ -0,0 +1,76 @@
/* mc68020 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
store difference in a third limb vector.
Copyright (C) 1992, 1994 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of the GNU Library General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
License for more details.
You should have received a copy of the GNU Library General Public License
along with the GNU MP Library; see the file COPYING.LIB. If not, write to
the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
/*
INPUT PARAMETERS
res_ptr (sp + 4)
s1_ptr (sp + 8)
s2_ptr (sp + 16)
size (sp + 12)
*/
#include "asm-syntax.h"
TEXT
ALIGN
GLOBL ___mpn_sub_n
LAB(___mpn_sub_n)
/* Save used registers on the stack. */
INSN2(move,l ,MEM_PREDEC(sp),d2)
INSN2(move,l ,MEM_PREDEC(sp),a2)
/* Copy the arguments to registers. Better use movem? */
INSN2(move,l ,a2,MEM_DISP(sp,12))
INSN2(move,l ,a0,MEM_DISP(sp,16))
INSN2(move,l ,a1,MEM_DISP(sp,20))
INSN2(move,l ,d2,MEM_DISP(sp,24))
INSN2(eor,w ,d2,#1)
INSN2(lsr,l ,d2,#1)
bcc L1
INSN2(subq,l ,d2,#1) /* clears cy as side effect */
LAB(Loop)
INSN2(move,l ,d0,MEM_POSTINC(a0))
INSN2(move,l ,d1,MEM_POSTINC(a1))
INSN2(subx,l ,d0,d1)
INSN2(move,l ,MEM_POSTINC(a2),d0)
LAB(L1) INSN2(move,l ,d0,MEM_POSTINC(a0))
INSN2(move,l ,d1,MEM_POSTINC(a1))
INSN2(subx,l ,d0,d1)
INSN2(move,l ,MEM_POSTINC(a2),d0)
dbf d2,Loop /* loop until 16 lsb of %4 == -1 */
INSN2(subx,l ,d0,d0) /* d0 <= -cy; save cy as 0 or -1 in d0 */
INSN2(sub,l ,d2,#0x10000)
bcs L2
INSN2(add,l ,d0,d0) /* restore cy */
bra Loop
LAB(L2)
INSN1(neg,l ,d0)
/* Restore used registers from stack frame. */
INSN2(move,l ,a2,MEM_POSTINC(sp))
INSN2(move,l ,d2,MEM_POSTINC(sp))
rts

103
sysdeps/m88k/add_n.s Normal file
View File

@ -0,0 +1,103 @@
; mc88100 __mpn_add -- Add two limb vectors of the same length > 0 and store
; sum in a third limb vector.
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
; This file is part of the GNU MP Library.
; The GNU MP Library is free software; you can redistribute it and/or modify
; it under the terms of the GNU Library General Public License as published by
; the Free Software Foundation; either version 2 of the License, or (at your
; option) any later version.
; The GNU MP Library is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
; License for more details.
; You should have received a copy of the GNU Library General Public License
; along with the GNU MP Library; see the file COPYING.LIB. If not, write to
; the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
; INPUT PARAMETERS
; res_ptr r2
; s1_ptr r3
; s2_ptr r4
; size r5
; This code has been optimized to run one instruction per clock, avoiding
; load stalls and writeback contention. As a result, the instruction
; order is not always natural.
; The speed is about 4.6 clocks/limb + 18 clocks/limb-vector on an 88100,
; but on the 88110, it seems to run much slower, 6.6 clocks/limb.
text
align 16
global ___mpn_add_n
___mpn_add_n:
ld r6,r3,0 ; read first limb from s1_ptr
extu r10,r5,3
ld r7,r4,0 ; read first limb from s2_ptr
subu.co r5,r0,r5 ; (clear carry as side effect)
mak r5,r5,3<4>
bcnd eq0,r5,Lzero
or r12,r0,lo16(Lbase)
or.u r12,r12,hi16(Lbase)
addu r12,r12,r5 ; r12 is address for entering in loop
extu r5,r5,2 ; divide by 4
subu r2,r2,r5 ; adjust res_ptr
subu r3,r3,r5 ; adjust s1_ptr
subu r4,r4,r5 ; adjust s2_ptr
or r8,r6,r0
jmp.n r12
or r9,r7,r0
Loop: addu r3,r3,32
st r8,r2,28
addu r4,r4,32
ld r6,r3,0
addu r2,r2,32
ld r7,r4,0
Lzero: subu r10,r10,1 ; add 0 + 8r limbs (adj loop cnt)
Lbase: ld r8,r3,4
addu.cio r6,r6,r7
ld r9,r4,4
st r6,r2,0
ld r6,r3,8 ; add 7 + 8r limbs
addu.cio r8,r8,r9
ld r7,r4,8
st r8,r2,4
ld r8,r3,12 ; add 6 + 8r limbs
addu.cio r6,r6,r7
ld r9,r4,12
st r6,r2,8
ld r6,r3,16 ; add 5 + 8r limbs
addu.cio r8,r8,r9
ld r7,r4,16
st r8,r2,12
ld r8,r3,20 ; add 4 + 8r limbs
addu.cio r6,r6,r7
ld r9,r4,20
st r6,r2,16
ld r6,r3,24 ; add 3 + 8r limbs
addu.cio r8,r8,r9
ld r7,r4,24
st r8,r2,20
ld r8,r3,28 ; add 2 + 8r limbs
addu.cio r6,r6,r7
ld r9,r4,28
st r6,r2,24
bcnd.n ne0,r10,Loop ; add 1 + 8r limbs
addu.cio r8,r8,r9
st r8,r2,28 ; store most significant limb
jmp.n r1
addu.ci r2,r0,r0 ; return carry-out from most sign. limb

128
sysdeps/m88k/mul_1.s Normal file
View File

@ -0,0 +1,128 @@
; mc88100 __mpn_mul_1 -- Multiply a limb vector with a single limb and
; store the product in a second limb vector.
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
; This file is part of the GNU MP Library.
; The GNU MP Library is free software; you can redistribute it and/or modify
; it under the terms of the GNU Library General Public License as published by
; the Free Software Foundation; either version 2 of the License, or (at your
; option) any later version.
; The GNU MP Library is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
; License for more details.
; You should have received a copy of the GNU Library General Public License
; along with the GNU MP Library; see the file COPYING.LIB. If not, write to
; the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
; INPUT PARAMETERS
; res_ptr r2
; s1_ptr r3
; size r4
; s2_limb r5
; Common overhead is about 11 cycles/invocation.
; The speed for S2_LIMB >= 0x10000 is approximately 21 cycles/limb. (The
; pipeline stalls 2 cycles due to WB contention.)
; The speed for S2_LIMB < 0x10000 is approximately 16 cycles/limb. (The
; pipeline stalls 2 cycles due to WB contention and 1 cycle due to latency.)
; To enhance speed:
; 1. Unroll main loop 4-8 times.
; 2. Schedule code to avoid WB contention. It might be tempting to move the
; ld instruction in the loops down to save 2 cycles (less WB contention),
; but that looses because the ultimate value will be read from outside
; the allocated space. But if we handle the ultimate multiplication in
; the tail, we can do this.
; 3. Make the multiplication with less instructions. I think the code for
; (S2_LIMB >= 0x10000) is not minimal.
; With these techniques the (S2_LIMB >= 0x10000) case would run in 17 or
; less cycles/limb; the (S2_LIMB < 0x10000) case would run in 11
; cycles/limb. (Assuming infinite unrolling.)
text
align 16
global ___mpn_mul_1
___mpn_mul_1:
; Make S1_PTR and RES_PTR point at the end of their blocks
; and negate SIZE.
lda r3,r3[r4]
lda r6,r2[r4] ; RES_PTR in r6 since r2 is retval
subu r4,r0,r4
addu.co r2,r0,r0 ; r2 = cy = 0
ld r9,r3[r4]
mask r7,r5,0xffff ; r7 = lo(S2_LIMB)
extu r8,r5,16 ; r8 = hi(S2_LIMB)
bcnd.n eq0,r8,Lsmall ; jump if (hi(S2_LIMB) == 0)
subu r6,r6,4
; General code for any value of S2_LIMB.
; Make a stack frame and save r25 and r26
subu r31,r31,16
st.d r25,r31,8
; Enter the loop in the middle
br.n L1
addu r4,r4,1
Loop:
ld r9,r3[r4]
st r26,r6[r4]
; bcnd ne0,r0,0 ; bubble
addu r4,r4,1
L1: mul r26,r9,r5 ; low word of product mul_1 WB ld
mask r12,r9,0xffff ; r12 = lo(s1_limb) mask_1
mul r11,r12,r7 ; r11 = prod_0 mul_2 WB mask_1
mul r10,r12,r8 ; r10 = prod_1a mul_3
extu r13,r9,16 ; r13 = hi(s1_limb) extu_1 WB mul_1
mul r12,r13,r7 ; r12 = prod_1b mul_4 WB extu_1
mul r25,r13,r8 ; r25 = prod_2 mul_5 WB mul_2
extu r11,r11,16 ; r11 = hi(prod_0) extu_2 WB mul_3
addu r10,r10,r11 ; addu_1 WB extu_2
; bcnd ne0,r0,0 ; bubble WB addu_1
addu.co r10,r10,r12 ; WB mul_4
mask.u r10,r10,0xffff ; move the 16 most significant bits...
addu.ci r10,r10,r0 ; ...to the low half of the word...
rot r10,r10,16 ; ...and put carry in pos 16.
addu.co r26,r26,r2 ; add old carry limb
bcnd.n ne0,r4,Loop
addu.ci r2,r25,r10 ; compute new carry limb
st r26,r6[r4]
ld.d r25,r31,8
jmp.n r1
addu r31,r31,16
; Fast code for S2_LIMB < 0x10000
Lsmall:
; Enter the loop in the middle
br.n SL1
addu r4,r4,1
SLoop:
ld r9,r3[r4] ;
st r8,r6[r4] ;
addu r4,r4,1 ;
SL1: mul r8,r9,r5 ; low word of product
mask r12,r9,0xffff ; r12 = lo(s1_limb)
extu r13,r9,16 ; r13 = hi(s1_limb)
mul r11,r12,r7 ; r11 = prod_0
mul r12,r13,r7 ; r12 = prod_1b
addu.cio r8,r8,r2 ; add old carry limb
extu r10,r11,16 ; r11 = hi(prod_0)
addu r10,r10,r12 ;
bcnd.n ne0,r4,SLoop
extu r2,r10,16 ; r2 = new carry limb
jmp.n r1
st r8,r6[r4]

104
sysdeps/m88k/sub_n.s Normal file
View File

@ -0,0 +1,104 @@
; mc88100 __mpn_sub -- Subtract two limb vectors of the same length > 0 and
; store difference in a third limb vector.
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
; This file is part of the GNU MP Library.
; The GNU MP Library is free software; you can redistribute it and/or modify
; it under the terms of the GNU Library General Public License as published by
; the Free Software Foundation; either version 2 of the License, or (at your
; option) any later version.
; The GNU MP Library is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
; License for more details.
; You should have received a copy of the GNU Library General Public License
; along with the GNU MP Library; see the file COPYING.LIB. If not, write to
; the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
; INPUT PARAMETERS
; res_ptr r2
; s1_ptr r3
; s2_ptr r4
; size r5
; This code has been optimized to run one instruction per clock, avoiding
; load stalls and writeback contention. As a result, the instruction
; order is not always natural.
; The speed is about 4.6 clocks/limb + 18 clocks/limb-vector on an 88100,
; but on the 88110, it seems to run much slower, 6.6 clocks/limb.
text
align 16
global ___mpn_sub_n
___mpn_sub_n:
ld r6,r3,0 ; read first limb from s1_ptr
extu r10,r5,3
ld r7,r4,0 ; read first limb from s2_ptr
subu.co r5,r0,r5 ; (clear carry as side effect)
mak r5,r5,3<4>
bcnd eq0,r5,Lzero
or r12,r0,lo16(Lbase)
or.u r12,r12,hi16(Lbase)
addu r12,r12,r5 ; r12 is address for entering in loop
extu r5,r5,2 ; divide by 4
subu r2,r2,r5 ; adjust res_ptr
subu r3,r3,r5 ; adjust s1_ptr
subu r4,r4,r5 ; adjust s2_ptr
or r8,r6,r0
jmp.n r12
or r9,r7,r0
Loop: addu r3,r3,32
st r8,r2,28
addu r4,r4,32
ld r6,r3,0
addu r2,r2,32
ld r7,r4,0
Lzero: subu r10,r10,1 ; subtract 0 + 8r limbs (adj loop cnt)
Lbase: ld r8,r3,4
subu.cio r6,r6,r7
ld r9,r4,4
st r6,r2,0
ld r6,r3,8 ; subtract 7 + 8r limbs
subu.cio r8,r8,r9
ld r7,r4,8
st r8,r2,4
ld r8,r3,12 ; subtract 6 + 8r limbs
subu.cio r6,r6,r7
ld r9,r4,12
st r6,r2,8
ld r6,r3,16 ; subtract 5 + 8r limbs
subu.cio r8,r8,r9
ld r7,r4,16
st r8,r2,12
ld r8,r3,20 ; subtract 4 + 8r limbs
subu.cio r6,r6,r7
ld r9,r4,20
st r6,r2,16
ld r6,r3,24 ; subtract 3 + 8r limbs
subu.cio r8,r8,r9
ld r7,r4,24
st r8,r2,20
ld r8,r3,28 ; subtract 2 + 8r limbs
subu.cio r6,r6,r7
ld r9,r4,28
st r6,r2,24
bcnd.n ne0,r10,Loop ; subtract 1 + 8r limbs
subu.cio r8,r8,r9
st r8,r2,28 ; store most significant limb
addu.ci r2,r0,r0 ; return carry-out from most sign. limb
jmp.n r1
xor r2,r2,1

View File

@ -45,7 +45,7 @@ __mpn_add_n:
bdz Lend # If done, skip loop
Loop: lu 8,4(4) # load s1 limb and update s1_ptr
lu 0,4(5) # load s2 limb and update s2_ptr
stu 7,4(3) # store previous limb in load latecny slot
stu 7,4(3) # store previous limb in load latency slot
ae 7,0,8 # add new limbs with cy, set cy
bdn Loop # decrement CTR and loop back
Lend: st 7,4(3) # store ultimate result limb

View File

@ -46,7 +46,7 @@ __mpn_sub_n:
bdz Lend # If done, skip loop
Loop: lu 8,4(4) # load s1 limb and update s1_ptr
lu 0,4(5) # load s2 limb and update s2_ptr
stu 7,4(3) # store previous limb in load latecny slot
stu 7,4(3) # store previous limb in load latency slot
sfe 7,0,8 # add new limbs with cy, set cy
bdn Loop # decrement CTR and loop back
Lend: st 7,4(3) # store ultimate result limb