gcc/libgcc/libgcc2.c

2457 lines
54 KiB
C
Raw Permalink Normal View History

1992-01-28 04:44:05 +01:00
/* More subroutines needed by GCC output code on some machines. */
/* Compile this one with gcc. */
2022-01-03 10:42:10 +01:00
/* Copyright (C) 1989-2022 Free Software Foundation, Inc.
1992-01-28 04:44:05 +01:00
Makefile.in, [...]: replace "GNU CC" with "GCC". * Makefile.in, alias.c, basic-block.h, bb-reorder.c, bitmap.c, bitmap.h, builtin-types.def, builtins.c, builtins.def, c-aux-info.c, c-common.c, c-common.def, c-common.h, c-convert.c, c-decl.c, c-dump.c, c-dump.h, c-errors.c, c-format.c, c-lang.c, c-lex.c, c-lex.h, c-parse.in, c-pragma.c, c-pragma.h, c-semantics.c, c-tree.h, c-typeck.c, caller-save.c, calls.c, collect2.c, collect2.h, combine.c, conditions.h, config.gcc, configure.frag, configure.in, conflict.c, convert.c, convert.h, cppspec.c, crtstuff.c, cse.c, cselib.c, cselib.h, dbxout.c, dbxout.h, defaults.h, dependence.c, df.c, df.h, diagnostic.c, diagnostic.h, doloop.c, dominance.c, dwarf.h, dwarf2.h, dwarf2asm.c, dwarf2asm.h, dwarf2out.c, dwarf2out.h, dwarfout.c, emit-rtl.c, errors.c, errors.h, except.c, except.h, exgettext, explow.c, expmed.c, expr.c, expr.h, final.c, fixproto, flags.h, flow.c, fold-const.c, fp-test.c, function.c, function.h, gbl-ctors.h, gcc.c, gcc.h, gcc.hlp, gccspec.c, gcov-io.h, gcse.c, genattr.c, genattrtab.c, gencheck.c, gencodes.c, genconfig.c, genemit.c, genextract.c, genflags.c, gengenrtl.c, genmultilib, genopinit.c, genoutput.c, genpeep.c, genrecog.c, gensupport.c, gensupport.h, ggc-callbacks.c, ggc-common.c, ggc-none.c, ggc-page.c, ggc-simple.c, ggc.h, global.c, graph.c, graph.h, gthr-aix.h, gthr-dce.h, gthr-posix.h, gthr-rtems.h, gthr-single.h, gthr-solaris.h, gthr-vxworks.h, gthr-win32.h, gthr.h, haifa-sched.c, halfpic.c, halfpic.h, hard-reg-set.h, hwint.h, ifcvt.c, input.h, insn-addr.h, integrate.c, integrate.h, jump.c, lcm.c, libgcc2.c, libgcc2.h, lists.c, local-alloc.c, loop.c, loop.h, machmode.def, machmode.h, main.c, mbchar.c, mbchar.h, mips-tdump.c, mips-tfile.c, mklibgcc.in, mkmap-flat.awk, mkmap-symver.awk, optabs.c, output.h, params.c, params.def, params.h, predict.c, predict.def, predict.h, prefix.c, prefix.h, print-rtl.c, print-tree.c, profile.c, protoize.c, read-rtl.c, real.c, real.h, recog.c, recog.h, reg-stack.c, regclass.c, regmove.c, regrename.c, regs.h, reload.c, reload.h, reload1.c, reorg.c, resource.c, resource.h, rtl.c, rtl.def, rtl.h, rtlanal.c, sbitmap.c, sbitmap.h, sched-deps.c, sched-ebb.c, sched-int.h, sched-rgn.c, sched-vis.c, sdbout.c, sdbout.h, sibcall.c, simplify-rtx.c, ssa-ccp.c, ssa-dce.c, ssa.c, ssa.h, stmt.c, stor-layout.c, stringpool.c, system.h, timevar.c, timevar.def, timevar.h, tlink.c, toplev.c, toplev.h, tree.c, tree.def, tree.h, tsystem.h, unroll.c, unwind-dw2-fde.c, unwind-dw2-fde.h, unwind-dw2.c, unwind-pe.h, unwind-sjlj.c, unwind.h, unwind.inc, varasm.c, varray.c, varray.h, xcoffout.c, xcoffout.h: replace "GNU CC" with "GCC". From-SVN: r45105
2001-08-22 16:35:51 +02:00
This file is part of GCC.
1992-01-28 04:44:05 +01:00
Makefile.in, [...]: replace "GNU CC" with "GCC". * Makefile.in, alias.c, basic-block.h, bb-reorder.c, bitmap.c, bitmap.h, builtin-types.def, builtins.c, builtins.def, c-aux-info.c, c-common.c, c-common.def, c-common.h, c-convert.c, c-decl.c, c-dump.c, c-dump.h, c-errors.c, c-format.c, c-lang.c, c-lex.c, c-lex.h, c-parse.in, c-pragma.c, c-pragma.h, c-semantics.c, c-tree.h, c-typeck.c, caller-save.c, calls.c, collect2.c, collect2.h, combine.c, conditions.h, config.gcc, configure.frag, configure.in, conflict.c, convert.c, convert.h, cppspec.c, crtstuff.c, cse.c, cselib.c, cselib.h, dbxout.c, dbxout.h, defaults.h, dependence.c, df.c, df.h, diagnostic.c, diagnostic.h, doloop.c, dominance.c, dwarf.h, dwarf2.h, dwarf2asm.c, dwarf2asm.h, dwarf2out.c, dwarf2out.h, dwarfout.c, emit-rtl.c, errors.c, errors.h, except.c, except.h, exgettext, explow.c, expmed.c, expr.c, expr.h, final.c, fixproto, flags.h, flow.c, fold-const.c, fp-test.c, function.c, function.h, gbl-ctors.h, gcc.c, gcc.h, gcc.hlp, gccspec.c, gcov-io.h, gcse.c, genattr.c, genattrtab.c, gencheck.c, gencodes.c, genconfig.c, genemit.c, genextract.c, genflags.c, gengenrtl.c, genmultilib, genopinit.c, genoutput.c, genpeep.c, genrecog.c, gensupport.c, gensupport.h, ggc-callbacks.c, ggc-common.c, ggc-none.c, ggc-page.c, ggc-simple.c, ggc.h, global.c, graph.c, graph.h, gthr-aix.h, gthr-dce.h, gthr-posix.h, gthr-rtems.h, gthr-single.h, gthr-solaris.h, gthr-vxworks.h, gthr-win32.h, gthr.h, haifa-sched.c, halfpic.c, halfpic.h, hard-reg-set.h, hwint.h, ifcvt.c, input.h, insn-addr.h, integrate.c, integrate.h, jump.c, lcm.c, libgcc2.c, libgcc2.h, lists.c, local-alloc.c, loop.c, loop.h, machmode.def, machmode.h, main.c, mbchar.c, mbchar.h, mips-tdump.c, mips-tfile.c, mklibgcc.in, mkmap-flat.awk, mkmap-symver.awk, optabs.c, output.h, params.c, params.def, params.h, predict.c, predict.def, predict.h, prefix.c, prefix.h, print-rtl.c, print-tree.c, profile.c, protoize.c, read-rtl.c, real.c, real.h, recog.c, recog.h, reg-stack.c, regclass.c, regmove.c, regrename.c, regs.h, reload.c, reload.h, reload1.c, reorg.c, resource.c, resource.h, rtl.c, rtl.def, rtl.h, rtlanal.c, sbitmap.c, sbitmap.h, sched-deps.c, sched-ebb.c, sched-int.h, sched-rgn.c, sched-vis.c, sdbout.c, sdbout.h, sibcall.c, simplify-rtx.c, ssa-ccp.c, ssa-dce.c, ssa.c, ssa.h, stmt.c, stor-layout.c, stringpool.c, system.h, timevar.c, timevar.def, timevar.h, tlink.c, toplev.c, toplev.h, tree.c, tree.def, tree.h, tsystem.h, unroll.c, unwind-dw2-fde.c, unwind-dw2-fde.h, unwind-dw2.c, unwind-pe.h, unwind-sjlj.c, unwind.h, unwind.inc, varasm.c, varray.c, varray.h, xcoffout.c, xcoffout.h: replace "GNU CC" with "GCC". From-SVN: r45105
2001-08-22 16:35:51 +02:00
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
Makefile.in, [...]: replace "GNU CC" with "GCC". * Makefile.in, alias.c, basic-block.h, bb-reorder.c, bitmap.c, bitmap.h, builtin-types.def, builtins.c, builtins.def, c-aux-info.c, c-common.c, c-common.def, c-common.h, c-convert.c, c-decl.c, c-dump.c, c-dump.h, c-errors.c, c-format.c, c-lang.c, c-lex.c, c-lex.h, c-parse.in, c-pragma.c, c-pragma.h, c-semantics.c, c-tree.h, c-typeck.c, caller-save.c, calls.c, collect2.c, collect2.h, combine.c, conditions.h, config.gcc, configure.frag, configure.in, conflict.c, convert.c, convert.h, cppspec.c, crtstuff.c, cse.c, cselib.c, cselib.h, dbxout.c, dbxout.h, defaults.h, dependence.c, df.c, df.h, diagnostic.c, diagnostic.h, doloop.c, dominance.c, dwarf.h, dwarf2.h, dwarf2asm.c, dwarf2asm.h, dwarf2out.c, dwarf2out.h, dwarfout.c, emit-rtl.c, errors.c, errors.h, except.c, except.h, exgettext, explow.c, expmed.c, expr.c, expr.h, final.c, fixproto, flags.h, flow.c, fold-const.c, fp-test.c, function.c, function.h, gbl-ctors.h, gcc.c, gcc.h, gcc.hlp, gccspec.c, gcov-io.h, gcse.c, genattr.c, genattrtab.c, gencheck.c, gencodes.c, genconfig.c, genemit.c, genextract.c, genflags.c, gengenrtl.c, genmultilib, genopinit.c, genoutput.c, genpeep.c, genrecog.c, gensupport.c, gensupport.h, ggc-callbacks.c, ggc-common.c, ggc-none.c, ggc-page.c, ggc-simple.c, ggc.h, global.c, graph.c, graph.h, gthr-aix.h, gthr-dce.h, gthr-posix.h, gthr-rtems.h, gthr-single.h, gthr-solaris.h, gthr-vxworks.h, gthr-win32.h, gthr.h, haifa-sched.c, halfpic.c, halfpic.h, hard-reg-set.h, hwint.h, ifcvt.c, input.h, insn-addr.h, integrate.c, integrate.h, jump.c, lcm.c, libgcc2.c, libgcc2.h, lists.c, local-alloc.c, loop.c, loop.h, machmode.def, machmode.h, main.c, mbchar.c, mbchar.h, mips-tdump.c, mips-tfile.c, mklibgcc.in, mkmap-flat.awk, mkmap-symver.awk, optabs.c, output.h, params.c, params.def, params.h, predict.c, predict.def, predict.h, prefix.c, prefix.h, print-rtl.c, print-tree.c, profile.c, protoize.c, read-rtl.c, real.c, real.h, recog.c, recog.h, reg-stack.c, regclass.c, regmove.c, regrename.c, regs.h, reload.c, reload.h, reload1.c, reorg.c, resource.c, resource.h, rtl.c, rtl.def, rtl.h, rtlanal.c, sbitmap.c, sbitmap.h, sched-deps.c, sched-ebb.c, sched-int.h, sched-rgn.c, sched-vis.c, sdbout.c, sdbout.h, sibcall.c, simplify-rtx.c, ssa-ccp.c, ssa-dce.c, ssa.c, ssa.h, stmt.c, stor-layout.c, stringpool.c, system.h, timevar.c, timevar.def, timevar.h, tlink.c, toplev.c, toplev.h, tree.c, tree.def, tree.h, tsystem.h, unroll.c, unwind-dw2-fde.c, unwind-dw2-fde.h, unwind-dw2.c, unwind-pe.h, unwind-sjlj.c, unwind.h, unwind.inc, varasm.c, varray.c, varray.h, xcoffout.c, xcoffout.h: replace "GNU CC" with "GCC". From-SVN: r45105
2001-08-22 16:35:51 +02:00
version.
1992-01-28 04:44:05 +01:00
Makefile.in, [...]: replace "GNU CC" with "GCC". * Makefile.in, alias.c, basic-block.h, bb-reorder.c, bitmap.c, bitmap.h, builtin-types.def, builtins.c, builtins.def, c-aux-info.c, c-common.c, c-common.def, c-common.h, c-convert.c, c-decl.c, c-dump.c, c-dump.h, c-errors.c, c-format.c, c-lang.c, c-lex.c, c-lex.h, c-parse.in, c-pragma.c, c-pragma.h, c-semantics.c, c-tree.h, c-typeck.c, caller-save.c, calls.c, collect2.c, collect2.h, combine.c, conditions.h, config.gcc, configure.frag, configure.in, conflict.c, convert.c, convert.h, cppspec.c, crtstuff.c, cse.c, cselib.c, cselib.h, dbxout.c, dbxout.h, defaults.h, dependence.c, df.c, df.h, diagnostic.c, diagnostic.h, doloop.c, dominance.c, dwarf.h, dwarf2.h, dwarf2asm.c, dwarf2asm.h, dwarf2out.c, dwarf2out.h, dwarfout.c, emit-rtl.c, errors.c, errors.h, except.c, except.h, exgettext, explow.c, expmed.c, expr.c, expr.h, final.c, fixproto, flags.h, flow.c, fold-const.c, fp-test.c, function.c, function.h, gbl-ctors.h, gcc.c, gcc.h, gcc.hlp, gccspec.c, gcov-io.h, gcse.c, genattr.c, genattrtab.c, gencheck.c, gencodes.c, genconfig.c, genemit.c, genextract.c, genflags.c, gengenrtl.c, genmultilib, genopinit.c, genoutput.c, genpeep.c, genrecog.c, gensupport.c, gensupport.h, ggc-callbacks.c, ggc-common.c, ggc-none.c, ggc-page.c, ggc-simple.c, ggc.h, global.c, graph.c, graph.h, gthr-aix.h, gthr-dce.h, gthr-posix.h, gthr-rtems.h, gthr-single.h, gthr-solaris.h, gthr-vxworks.h, gthr-win32.h, gthr.h, haifa-sched.c, halfpic.c, halfpic.h, hard-reg-set.h, hwint.h, ifcvt.c, input.h, insn-addr.h, integrate.c, integrate.h, jump.c, lcm.c, libgcc2.c, libgcc2.h, lists.c, local-alloc.c, loop.c, loop.h, machmode.def, machmode.h, main.c, mbchar.c, mbchar.h, mips-tdump.c, mips-tfile.c, mklibgcc.in, mkmap-flat.awk, mkmap-symver.awk, optabs.c, output.h, params.c, params.def, params.h, predict.c, predict.def, predict.h, prefix.c, prefix.h, print-rtl.c, print-tree.c, profile.c, protoize.c, read-rtl.c, real.c, real.h, recog.c, recog.h, reg-stack.c, regclass.c, regmove.c, regrename.c, regs.h, reload.c, reload.h, reload1.c, reorg.c, resource.c, resource.h, rtl.c, rtl.def, rtl.h, rtlanal.c, sbitmap.c, sbitmap.h, sched-deps.c, sched-ebb.c, sched-int.h, sched-rgn.c, sched-vis.c, sdbout.c, sdbout.h, sibcall.c, simplify-rtx.c, ssa-ccp.c, ssa-dce.c, ssa.c, ssa.h, stmt.c, stor-layout.c, stringpool.c, system.h, timevar.c, timevar.def, timevar.h, tlink.c, toplev.c, toplev.h, tree.c, tree.def, tree.h, tsystem.h, unroll.c, unwind-dw2-fde.c, unwind-dw2-fde.h, unwind-dw2.c, unwind-pe.h, unwind-sjlj.c, unwind.h, unwind.inc, varasm.c, varray.c, varray.h, xcoffout.c, xcoffout.h: replace "GNU CC" with "GCC". From-SVN: r45105
2001-08-22 16:35:51 +02:00
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
1992-01-28 04:44:05 +01:00
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
1992-01-28 04:44:05 +01:00
#include "tconfig.h"
#include "tsystem.h"
#include "coretypes.h"
#include "tm.h"
Move libgcc_tm_file to toplevel libgcc gcc: * configure.ac (libgcc_tm_file_list, libgcc_tm_include_list): Remove. * configure: Regenerate. * Makefile.in (libgcc_tm_file_list, libgcc_tm_include_list): Remove. (TM_H): Remove libgcc_tm.h, $(libgcc_tm_file_list). (libgcc_tm.h, cs-libgcc_tm.h): Remove. (clean): Remove libgcc_tm.h * mkconfig.sh: Don't include libgcc_tm.h in tm.h. * config.gcc (libgcc_tm_file): Remove. (arm*-*-linux*): Remove libgcc_tm_file for arm*-*-linux-*eabi. (arm*-*-uclinux*): Remove libgcc_tm_file for arm*-*-uclinux*eabi. (arm*-*-eabi*, arm*-*-symbianelf*): Remove libgcc_tm_file. (avr-*-rtems*): Likewise. (avr-*-*): Likewise. (frv-*-elf): Likewise. (frv-*-*linux*): Likewise. (h8300-*-rtems*): Likewise. (h8300-*-elf*): Likewise. (i[34567]86-*-darwin*): Likewise. (x86_64-*-darwin*): Likewise. (rx-*-elf*): Likewise. (tic6x-*-elf): Likewise. (tic6x-*-uclinux): Likewise. (i[34567]86-*-linux*, x86_64-*-linux*): Likewise. libgcc: * configure.ac (tm_file_): New variable. Determine from tm_file. (tm_file, tm_defines): Substitute. * configure: Regenerate. * mkheader.sh: New file. * Makefile.in (clean): Remove libgcc_tm.h. ($(objects)): Depend on libgcc_tm.h. (libgcc_tm_defines, libgcc_tm_file): New variables. (libgcc_tm.h, libgcc_tm.stamp): New targets. ($(libgcc-objects), $(libgcc-s-objects), $(libgcc-eh-objects)) ($(libgcov-objects), $(libunwind-objects), $(libunwind-s-objects)) ($(extra-parts)): Depend on libgcc_tm.h. * config.host (tm_defines, tm_file): New variable. (arm*-*-linux*): Set tm_file for arm*-*-linux-*eabi. (arm*-*-uclinux*): Set tm_file for arm*-*-uclinux*eabi. (arm*-*-eabi*, arm*-*-symbianelf*): Set tm_file. (avr-*-rtems*): Likewise. (avr-*-*): Likewise. (frv-*-elf): Likewise. (frv-*-*linux*): Likewise. (h8300-*-rtems*): Likewise. (h8300-*-elf*): Likewise. (i[34567]86-*-darwin*): Likewise. (x86_64-*-darwin*): Likewise. (rx-*-elf): Likewise. (tic6x-*-uclinux): Likewise. (tic6x-*-elf): Likewise. (i[34567]86-*-linux*, x86_64-*-linux*): Likewise. * config/alpha/gthr-posix.c: Include libgcc_tm.h. * config/i386/cygming-crtbegin.c: Likewise. * config/i386/cygming-crtend.c: Likewise. * config/ia64/fde-vms.c: Likewise. * config/ia64/unwind-ia64.c: Likewise. * config/libbid/bid_gcc_intrinsics.h: Likewise. * config/rs6000/darwin-fallback.c: Likewise. * config/stormy16/lib2funcs.c: Likewise. * config/xtensa/unwind-dw2-xtensa.c: Likewise. * crtstuff.c: Likewise. * dfp-bit.h: Likewise. * emutls.c: Likewise. * fixed-bit.c: Likewise. * fp-bit.c: Likewise. * generic-morestack-thread.c: Likewise. * generic-morestack.c: Likewise. * libgcc2.c: Likewise. * libgcov.c: Likewise. * unwind-dw2-fde-dip.c: Likewise. * unwind-dw2-fde.c: Likewise. * unwind-dw2.c: Likewise. * unwind-sjlj.c: Likewise. Co-Authored-By: Paolo Bonzini <bonzini@gnu.org> From-SVN: r180775
2011-11-02 16:26:35 +01:00
#include "libgcc_tm.h"
#ifdef HAVE_GAS_HIDDEN
#define ATTRIBUTE_HIDDEN __attribute__ ((__visibility__ ("hidden")))
#else
#define ATTRIBUTE_HIDDEN
#endif
/* Work out the largest "word" size that we can deal with on this target. */
#if MIN_UNITS_PER_WORD > 4
# define LIBGCC2_MAX_UNITS_PER_WORD 8
#elif (MIN_UNITS_PER_WORD > 2 \
|| (MIN_UNITS_PER_WORD > 1 && __SIZEOF_LONG_LONG__ > 4))
# define LIBGCC2_MAX_UNITS_PER_WORD 4
#else
# define LIBGCC2_MAX_UNITS_PER_WORD MIN_UNITS_PER_WORD
#endif
/* Work out what word size we are using for this compilation.
The value can be set on the command line. */
#ifndef LIBGCC2_UNITS_PER_WORD
#define LIBGCC2_UNITS_PER_WORD LIBGCC2_MAX_UNITS_PER_WORD
#endif
#if LIBGCC2_UNITS_PER_WORD <= LIBGCC2_MAX_UNITS_PER_WORD
#include "libgcc2.h"
1992-01-28 04:44:05 +01:00
#ifdef DECLARE_LIBRARY_RENAMES
DECLARE_LIBRARY_RENAMES
#endif
#if defined (L_negdi2)
DWtype
__negdi2 (DWtype u)
{
const DWunion uu = {.ll = u};
const DWunion w = { {.low = -uu.s.low,
.high = -uu.s.high - ((UWtype) -uu.s.low > 0) } };
return w.ll;
}
#endif
#ifdef L_addvsi3
Wtype
__addvSI3 (Wtype a, Wtype b)
{
Wtype w;
if (__builtin_add_overflow (a, b, &w))
abort ();
return w;
}
#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
SItype
__addvsi3 (SItype a, SItype b)
{
SItype w;
if (__builtin_add_overflow (a, b, &w))
abort ();
return w;
}
#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
#endif
#ifdef L_addvdi3
DWtype
__addvDI3 (DWtype a, DWtype b)
{
DWtype w;
if (__builtin_add_overflow (a, b, &w))
abort ();
return w;
}
#endif
#ifdef L_subvsi3
Wtype
__subvSI3 (Wtype a, Wtype b)
{
Wtype w;
if (__builtin_sub_overflow (a, b, &w))
abort ();
return w;
}
#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
SItype
__subvsi3 (SItype a, SItype b)
{
SItype w;
if (__builtin_sub_overflow (a, b, &w))
abort ();
return w;
}
#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
#endif
#ifdef L_subvdi3
DWtype
__subvDI3 (DWtype a, DWtype b)
{
DWtype w;
if (__builtin_sub_overflow (a, b, &w))
abort ();
return w;
}
#endif
#ifdef L_mulvsi3
Wtype
__mulvSI3 (Wtype a, Wtype b)
{
Wtype w;
if (__builtin_mul_overflow (a, b, &w))
abort ();
return w;
}
#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
SItype
__mulvsi3 (SItype a, SItype b)
{
SItype w;
if (__builtin_mul_overflow (a, b, &w))
abort ();
return w;
}
#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
#endif
#ifdef L_negvsi2
Wtype
__negvSI2 (Wtype a)
{
Wtype w;
if (__builtin_sub_overflow (0, a, &w))
abort ();
return w;
}
#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
SItype
__negvsi2 (SItype a)
{
SItype w;
if (__builtin_sub_overflow (0, a, &w))
abort ();
return w;
}
#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
#endif
#ifdef L_negvdi2
DWtype
__negvDI2 (DWtype a)
{
DWtype w;
if (__builtin_sub_overflow (0, a, &w))
abort ();
return w;
}
#endif
#ifdef L_absvsi2
Wtype
__absvSI2 (Wtype a)
{
const Wtype v = 0 - (a < 0);
Wtype w;
if (__builtin_add_overflow (a, v, &w))
abort ();
return v ^ w;
}
#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
SItype
__absvsi2 (SItype a)
{
const SItype v = 0 - (a < 0);
SItype w;
if (__builtin_add_overflow (a, v, &w))
abort ();
return v ^ w;
}
#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
#endif
#ifdef L_absvdi2
DWtype
__absvDI2 (DWtype a)
{
const DWtype v = 0 - (a < 0);
DWtype w;
if (__builtin_add_overflow (a, v, &w))
abort ();
return v ^ w;
}
#endif
#ifdef L_mulvdi3
DWtype
__mulvDI3 (DWtype u, DWtype v)
{
/* The unchecked multiplication needs 3 Wtype x Wtype multiplications,
but the checked multiplication needs only two. */
const DWunion uu = {.ll = u};
const DWunion vv = {.ll = v};
if (__builtin_expect (uu.s.high == uu.s.low >> (W_TYPE_SIZE - 1), 1))
{
/* u fits in a single Wtype. */
if (__builtin_expect (vv.s.high == vv.s.low >> (W_TYPE_SIZE - 1), 1))
{
/* v fits in a single Wtype as well. */
/* A single multiplication. No overflow risk. */
return (DWtype) uu.s.low * (DWtype) vv.s.low;
}
else
{
/* Two multiplications. */
DWunion w0 = {.ll = (UDWtype) (UWtype) uu.s.low
* (UDWtype) (UWtype) vv.s.low};
DWunion w1 = {.ll = (UDWtype) (UWtype) uu.s.low
* (UDWtype) (UWtype) vv.s.high};
if (vv.s.high < 0)
w1.s.high -= uu.s.low;
if (uu.s.low < 0)
w1.ll -= vv.ll;
w1.ll += (UWtype) w0.s.high;
if (__builtin_expect (w1.s.high == w1.s.low >> (W_TYPE_SIZE - 1), 1))
{
w0.s.high = w1.s.low;
return w0.ll;
}
}
}
else
{
if (__builtin_expect (vv.s.high == vv.s.low >> (W_TYPE_SIZE - 1), 1))
{
/* v fits into a single Wtype. */
/* Two multiplications. */
DWunion w0 = {.ll = (UDWtype) (UWtype) uu.s.low
* (UDWtype) (UWtype) vv.s.low};
DWunion w1 = {.ll = (UDWtype) (UWtype) uu.s.high
* (UDWtype) (UWtype) vv.s.low};
if (uu.s.high < 0)
w1.s.high -= vv.s.low;
if (vv.s.low < 0)
w1.ll -= uu.ll;
w1.ll += (UWtype) w0.s.high;
if (__builtin_expect (w1.s.high == w1.s.low >> (W_TYPE_SIZE - 1), 1))
{
w0.s.high = w1.s.low;
return w0.ll;
}
}
else
{
/* A few sign checks and a single multiplication. */
if (uu.s.high >= 0)
{
if (vv.s.high >= 0)
{
if (uu.s.high == 0 && vv.s.high == 0)
{
const DWtype w = (UDWtype) (UWtype) uu.s.low
* (UDWtype) (UWtype) vv.s.low;
if (__builtin_expect (w >= 0, 1))
return w;
}
}
else
{
if (uu.s.high == 0 && vv.s.high == (Wtype) -1)
{
DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
* (UDWtype) (UWtype) vv.s.low};
ww.s.high -= uu.s.low;
if (__builtin_expect (ww.s.high < 0, 1))
return ww.ll;
}
}
}
else
{
if (vv.s.high >= 0)
{
if (uu.s.high == (Wtype) -1 && vv.s.high == 0)
{
DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
* (UDWtype) (UWtype) vv.s.low};
ww.s.high -= vv.s.low;
if (__builtin_expect (ww.s.high < 0, 1))
return ww.ll;
}
}
else
{
if ((uu.s.high & vv.s.high) == (Wtype) -1
&& (uu.s.low | vv.s.low) != 0)
{
DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
* (UDWtype) (UWtype) vv.s.low};
ww.s.high -= uu.s.low;
ww.s.high -= vv.s.low;
if (__builtin_expect (ww.s.high >= 0, 1))
return ww.ll;
}
}
}
}
}
/* Overflow. */
abort ();
}
#endif
1992-01-28 04:44:05 +01:00
/* Unless shift functions are defined with full ANSI prototypes,
libgcc2.h (word_type): Type definition removed. 2007-07-06 Andreas Krebbel <krebbel1@de.ibm.com> * libgcc2.h (word_type): Type definition removed. (cmp_return_type, shift_count_type): Type definitions added. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * libgcc2.c (__udivmoddi4, __moddi3): Type of local variable c changed from word_type to Wtype. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * c-common.c (handle_mode_attribute): Handling for libgcc_cmp_return and libgcc_shift_count attribute added. * target-def.h (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): New target hooks defined. (TARGET_INITIALIZER): New target hooks added. * targhooks.c (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Default implementations for the new target hooks added. * targhooks.h (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Function prototypes added. * target.h (struct gcc_target): Fields for the new target hooks added. * optabs.c (expand_binop): Use shift_count_mode when expanding shift as library call. (prepare_cmp_insn): Use cmp_return_mode when expanding comparison as library call. * doc/tm.texi (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Documentation added. * config/s390/s390.c (s390_libgcc_cmp_return_mode, s390_libgcc_shift_count_mode): Functions added. (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Target hooks defined. From-SVN: r126410
2007-07-06 12:47:31 +02:00
parameter b will be promoted to int if shift_count_type is smaller than an int. */
1992-01-28 04:44:05 +01:00
#ifdef L_lshrdi3
DWtype
libgcc2.h (word_type): Type definition removed. 2007-07-06 Andreas Krebbel <krebbel1@de.ibm.com> * libgcc2.h (word_type): Type definition removed. (cmp_return_type, shift_count_type): Type definitions added. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * libgcc2.c (__udivmoddi4, __moddi3): Type of local variable c changed from word_type to Wtype. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * c-common.c (handle_mode_attribute): Handling for libgcc_cmp_return and libgcc_shift_count attribute added. * target-def.h (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): New target hooks defined. (TARGET_INITIALIZER): New target hooks added. * targhooks.c (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Default implementations for the new target hooks added. * targhooks.h (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Function prototypes added. * target.h (struct gcc_target): Fields for the new target hooks added. * optabs.c (expand_binop): Use shift_count_mode when expanding shift as library call. (prepare_cmp_insn): Use cmp_return_mode when expanding comparison as library call. * doc/tm.texi (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Documentation added. * config/s390/s390.c (s390_libgcc_cmp_return_mode, s390_libgcc_shift_count_mode): Functions added. (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Target hooks defined. From-SVN: r126410
2007-07-06 12:47:31 +02:00
__lshrdi3 (DWtype u, shift_count_type b)
1992-01-28 04:44:05 +01:00
{
if (b == 0)
return u;
const DWunion uu = {.ll = u};
const shift_count_type bm = W_TYPE_SIZE - b;
DWunion w;
1992-01-28 04:44:05 +01:00
if (bm <= 0)
{
w.s.high = 0;
w.s.low = (UWtype) uu.s.high >> -bm;
1992-01-28 04:44:05 +01:00
}
else
{
const UWtype carries = (UWtype) uu.s.high << bm;
w.s.high = (UWtype) uu.s.high >> b;
w.s.low = ((UWtype) uu.s.low >> b) | carries;
1992-01-28 04:44:05 +01:00
}
return w.ll;
}
#endif
#ifdef L_ashldi3
DWtype
libgcc2.h (word_type): Type definition removed. 2007-07-06 Andreas Krebbel <krebbel1@de.ibm.com> * libgcc2.h (word_type): Type definition removed. (cmp_return_type, shift_count_type): Type definitions added. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * libgcc2.c (__udivmoddi4, __moddi3): Type of local variable c changed from word_type to Wtype. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * c-common.c (handle_mode_attribute): Handling for libgcc_cmp_return and libgcc_shift_count attribute added. * target-def.h (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): New target hooks defined. (TARGET_INITIALIZER): New target hooks added. * targhooks.c (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Default implementations for the new target hooks added. * targhooks.h (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Function prototypes added. * target.h (struct gcc_target): Fields for the new target hooks added. * optabs.c (expand_binop): Use shift_count_mode when expanding shift as library call. (prepare_cmp_insn): Use cmp_return_mode when expanding comparison as library call. * doc/tm.texi (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Documentation added. * config/s390/s390.c (s390_libgcc_cmp_return_mode, s390_libgcc_shift_count_mode): Functions added. (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Target hooks defined. From-SVN: r126410
2007-07-06 12:47:31 +02:00
__ashldi3 (DWtype u, shift_count_type b)
1992-01-28 04:44:05 +01:00
{
if (b == 0)
return u;
const DWunion uu = {.ll = u};
const shift_count_type bm = W_TYPE_SIZE - b;
DWunion w;
1992-01-28 04:44:05 +01:00
if (bm <= 0)
{
w.s.low = 0;
w.s.high = (UWtype) uu.s.low << -bm;
1992-01-28 04:44:05 +01:00
}
else
{
const UWtype carries = (UWtype) uu.s.low >> bm;
w.s.low = (UWtype) uu.s.low << b;
w.s.high = ((UWtype) uu.s.high << b) | carries;
1992-01-28 04:44:05 +01:00
}
return w.ll;
}
#endif
#ifdef L_ashrdi3
DWtype
libgcc2.h (word_type): Type definition removed. 2007-07-06 Andreas Krebbel <krebbel1@de.ibm.com> * libgcc2.h (word_type): Type definition removed. (cmp_return_type, shift_count_type): Type definitions added. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * libgcc2.c (__udivmoddi4, __moddi3): Type of local variable c changed from word_type to Wtype. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * c-common.c (handle_mode_attribute): Handling for libgcc_cmp_return and libgcc_shift_count attribute added. * target-def.h (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): New target hooks defined. (TARGET_INITIALIZER): New target hooks added. * targhooks.c (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Default implementations for the new target hooks added. * targhooks.h (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Function prototypes added. * target.h (struct gcc_target): Fields for the new target hooks added. * optabs.c (expand_binop): Use shift_count_mode when expanding shift as library call. (prepare_cmp_insn): Use cmp_return_mode when expanding comparison as library call. * doc/tm.texi (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Documentation added. * config/s390/s390.c (s390_libgcc_cmp_return_mode, s390_libgcc_shift_count_mode): Functions added. (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Target hooks defined. From-SVN: r126410
2007-07-06 12:47:31 +02:00
__ashrdi3 (DWtype u, shift_count_type b)
1992-01-28 04:44:05 +01:00
{
if (b == 0)
return u;
const DWunion uu = {.ll = u};
const shift_count_type bm = W_TYPE_SIZE - b;
DWunion w;
1992-01-28 04:44:05 +01:00
if (bm <= 0)
{
/* w.s.high = 1..1 or 0..0 */
w.s.high = uu.s.high >> (W_TYPE_SIZE - 1);
1992-01-28 04:44:05 +01:00
w.s.low = uu.s.high >> -bm;
}
else
{
const UWtype carries = (UWtype) uu.s.high << bm;
1992-01-28 04:44:05 +01:00
w.s.high = uu.s.high >> b;
w.s.low = ((UWtype) uu.s.low >> b) | carries;
1992-01-28 04:44:05 +01:00
}
return w.ll;
}
#endif
extend.texi (__builtin_bswap32): Document. 2006-09-07 Eric Christopher <echristo@apple.com> Falk Hueffner <falk@debian.org> * doc/extend.texi (__builtin_bswap32): Document. (__builtin_bswap64): Ditto. * doc/libgcc.texi (bswapsi2): Document. (bswapdi2): Ditto. * doc/rtl.texi (bswap): Document. * optabs.c (expand_unop): Don't widen a bswap. (init_optabs): Init bswap. Set libfuncs explicitly for bswapsi2 and bswapdi2. * optabs.h (OTI_bswap): New. (bswap_optab): Ditto. * genopinit.c (optabs): Handle bswap_optab. * tree.h (tree_index): Add TI_UINT32_TYPE and TI_UINT64_TYPE. (uint32_type_node): New. (uint64_type_node): Ditto. * tree.c (build_common_tree_nodes_2): Initialize uint32_type_node and uint64_type_node. * builtins.c (expand_builtin_bswap): New. (expand_builtin): Call. (fold_builtin_bswap): New. (fold_builtin_1): Call. * fold-const.c (tree_expr_nonnegative_p): Return true for bswap. * builtin-types.def (BT_UINT32): New. (BT_UINT64): Ditto. (BT_FN_UINT32_UINT32): Ditto. (BT_FN_UINT64_UINT64): Ditto. * builtins.def (BUILT_IN_BSWAP32): New. (BUILT_IN_BSWAP64): Ditto. * rtl.def (BSWAP): New. * genattrtab.c (check_attr_value): New. * libgcc2.c (__bswapSI2): New. (__bswapDI2): Ditto. * libgcc2.h (__bswapSI2): Declare. (__bswapDI2): Ditto. * mklibgcc.in (lib2funcs): Add _bswapsi2 and _bswapdi2. * simplify-rtx.c (simplify_const_unary_operation): Return 0 for BSWAP. * libgcc-std.ver (__bwapsi2): Add. (__bswapdi2): Ditto. * reload1.c (eliminate_regs_1): Add bswap. (elimination_effects): Ditto. * config/i386/i386.h (x86_bswap): New. (TARGET_BSWAP): Use. * config/i386/i386.c (x86_bswap): Set. Co-Authored-By: Falk Hueffner <falk@debian.org> From-SVN: r118361
2006-11-01 06:14:40 +01:00
#ifdef L_bswapsi2
SItype
__bswapsi2 (SItype u)
extend.texi (__builtin_bswap32): Document. 2006-09-07 Eric Christopher <echristo@apple.com> Falk Hueffner <falk@debian.org> * doc/extend.texi (__builtin_bswap32): Document. (__builtin_bswap64): Ditto. * doc/libgcc.texi (bswapsi2): Document. (bswapdi2): Ditto. * doc/rtl.texi (bswap): Document. * optabs.c (expand_unop): Don't widen a bswap. (init_optabs): Init bswap. Set libfuncs explicitly for bswapsi2 and bswapdi2. * optabs.h (OTI_bswap): New. (bswap_optab): Ditto. * genopinit.c (optabs): Handle bswap_optab. * tree.h (tree_index): Add TI_UINT32_TYPE and TI_UINT64_TYPE. (uint32_type_node): New. (uint64_type_node): Ditto. * tree.c (build_common_tree_nodes_2): Initialize uint32_type_node and uint64_type_node. * builtins.c (expand_builtin_bswap): New. (expand_builtin): Call. (fold_builtin_bswap): New. (fold_builtin_1): Call. * fold-const.c (tree_expr_nonnegative_p): Return true for bswap. * builtin-types.def (BT_UINT32): New. (BT_UINT64): Ditto. (BT_FN_UINT32_UINT32): Ditto. (BT_FN_UINT64_UINT64): Ditto. * builtins.def (BUILT_IN_BSWAP32): New. (BUILT_IN_BSWAP64): Ditto. * rtl.def (BSWAP): New. * genattrtab.c (check_attr_value): New. * libgcc2.c (__bswapSI2): New. (__bswapDI2): Ditto. * libgcc2.h (__bswapSI2): Declare. (__bswapDI2): Ditto. * mklibgcc.in (lib2funcs): Add _bswapsi2 and _bswapdi2. * simplify-rtx.c (simplify_const_unary_operation): Return 0 for BSWAP. * libgcc-std.ver (__bwapsi2): Add. (__bswapdi2): Ditto. * reload1.c (eliminate_regs_1): Add bswap. (elimination_effects): Ditto. * config/i386/i386.h (x86_bswap): New. (TARGET_BSWAP): Use. * config/i386/i386.c (x86_bswap): Set. Co-Authored-By: Falk Hueffner <falk@debian.org> From-SVN: r118361
2006-11-01 06:14:40 +01:00
{
return ((((u) & 0xff000000u) >> 24)
| (((u) & 0x00ff0000u) >> 8)
| (((u) & 0x0000ff00u) << 8)
| (((u) & 0x000000ffu) << 24));
extend.texi (__builtin_bswap32): Document. 2006-09-07 Eric Christopher <echristo@apple.com> Falk Hueffner <falk@debian.org> * doc/extend.texi (__builtin_bswap32): Document. (__builtin_bswap64): Ditto. * doc/libgcc.texi (bswapsi2): Document. (bswapdi2): Ditto. * doc/rtl.texi (bswap): Document. * optabs.c (expand_unop): Don't widen a bswap. (init_optabs): Init bswap. Set libfuncs explicitly for bswapsi2 and bswapdi2. * optabs.h (OTI_bswap): New. (bswap_optab): Ditto. * genopinit.c (optabs): Handle bswap_optab. * tree.h (tree_index): Add TI_UINT32_TYPE and TI_UINT64_TYPE. (uint32_type_node): New. (uint64_type_node): Ditto. * tree.c (build_common_tree_nodes_2): Initialize uint32_type_node and uint64_type_node. * builtins.c (expand_builtin_bswap): New. (expand_builtin): Call. (fold_builtin_bswap): New. (fold_builtin_1): Call. * fold-const.c (tree_expr_nonnegative_p): Return true for bswap. * builtin-types.def (BT_UINT32): New. (BT_UINT64): Ditto. (BT_FN_UINT32_UINT32): Ditto. (BT_FN_UINT64_UINT64): Ditto. * builtins.def (BUILT_IN_BSWAP32): New. (BUILT_IN_BSWAP64): Ditto. * rtl.def (BSWAP): New. * genattrtab.c (check_attr_value): New. * libgcc2.c (__bswapSI2): New. (__bswapDI2): Ditto. * libgcc2.h (__bswapSI2): Declare. (__bswapDI2): Ditto. * mklibgcc.in (lib2funcs): Add _bswapsi2 and _bswapdi2. * simplify-rtx.c (simplify_const_unary_operation): Return 0 for BSWAP. * libgcc-std.ver (__bwapsi2): Add. (__bswapdi2): Ditto. * reload1.c (eliminate_regs_1): Add bswap. (elimination_effects): Ditto. * config/i386/i386.h (x86_bswap): New. (TARGET_BSWAP): Use. * config/i386/i386.c (x86_bswap): Set. Co-Authored-By: Falk Hueffner <falk@debian.org> From-SVN: r118361
2006-11-01 06:14:40 +01:00
}
#endif
#ifdef L_bswapdi2
DItype
__bswapdi2 (DItype u)
extend.texi (__builtin_bswap32): Document. 2006-09-07 Eric Christopher <echristo@apple.com> Falk Hueffner <falk@debian.org> * doc/extend.texi (__builtin_bswap32): Document. (__builtin_bswap64): Ditto. * doc/libgcc.texi (bswapsi2): Document. (bswapdi2): Ditto. * doc/rtl.texi (bswap): Document. * optabs.c (expand_unop): Don't widen a bswap. (init_optabs): Init bswap. Set libfuncs explicitly for bswapsi2 and bswapdi2. * optabs.h (OTI_bswap): New. (bswap_optab): Ditto. * genopinit.c (optabs): Handle bswap_optab. * tree.h (tree_index): Add TI_UINT32_TYPE and TI_UINT64_TYPE. (uint32_type_node): New. (uint64_type_node): Ditto. * tree.c (build_common_tree_nodes_2): Initialize uint32_type_node and uint64_type_node. * builtins.c (expand_builtin_bswap): New. (expand_builtin): Call. (fold_builtin_bswap): New. (fold_builtin_1): Call. * fold-const.c (tree_expr_nonnegative_p): Return true for bswap. * builtin-types.def (BT_UINT32): New. (BT_UINT64): Ditto. (BT_FN_UINT32_UINT32): Ditto. (BT_FN_UINT64_UINT64): Ditto. * builtins.def (BUILT_IN_BSWAP32): New. (BUILT_IN_BSWAP64): Ditto. * rtl.def (BSWAP): New. * genattrtab.c (check_attr_value): New. * libgcc2.c (__bswapSI2): New. (__bswapDI2): Ditto. * libgcc2.h (__bswapSI2): Declare. (__bswapDI2): Ditto. * mklibgcc.in (lib2funcs): Add _bswapsi2 and _bswapdi2. * simplify-rtx.c (simplify_const_unary_operation): Return 0 for BSWAP. * libgcc-std.ver (__bwapsi2): Add. (__bswapdi2): Ditto. * reload1.c (eliminate_regs_1): Add bswap. (elimination_effects): Ditto. * config/i386/i386.h (x86_bswap): New. (TARGET_BSWAP): Use. * config/i386/i386.c (x86_bswap): Set. Co-Authored-By: Falk Hueffner <falk@debian.org> From-SVN: r118361
2006-11-01 06:14:40 +01:00
{
return ((((u) & 0xff00000000000000ull) >> 56)
| (((u) & 0x00ff000000000000ull) >> 40)
| (((u) & 0x0000ff0000000000ull) >> 24)
| (((u) & 0x000000ff00000000ull) >> 8)
| (((u) & 0x00000000ff000000ull) << 8)
| (((u) & 0x0000000000ff0000ull) << 24)
| (((u) & 0x000000000000ff00ull) << 40)
| (((u) & 0x00000000000000ffull) << 56));
}
#endif
#ifdef L_ffssi2
#undef int
int
__ffsSI2 (UWtype u)
{
UWtype count;
if (u == 0)
return 0;
count_trailing_zeros (count, u);
return count + 1;
}
#endif
#ifdef L_ffsdi2
#undef int
int
__ffsDI2 (DWtype u)
{
const DWunion uu = {.ll = u};
UWtype word, count, add;
if (uu.s.low != 0)
word = uu.s.low, add = 0;
else if (uu.s.high != 0)
word = uu.s.high, add = W_TYPE_SIZE;
else
return 0;
count_trailing_zeros (count, word);
return count + add + 1;
}
#endif
1992-01-28 04:44:05 +01:00
#ifdef L_muldi3
DWtype
__muldi3 (DWtype u, DWtype v)
1992-01-28 04:44:05 +01:00
{
const DWunion uu = {.ll = u};
const DWunion vv = {.ll = v};
DWunion w = {.ll = __umulsidi3 (uu.s.low, vv.s.low)};
1992-01-28 04:44:05 +01:00
w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
+ (UWtype) uu.s.high * (UWtype) vv.s.low);
1992-01-28 04:44:05 +01:00
return w.ll;
}
#endif
#if (defined (L_udivdi3) || defined (L_divdi3) || \
defined (L_umoddi3) || defined (L_moddi3))
#if defined (sdiv_qrnnd)
#define L_udiv_w_sdiv
#endif
#endif
#ifdef L_udiv_w_sdiv
#if defined (sdiv_qrnnd)
#if (defined (L_udivdi3) || defined (L_divdi3) || \
defined (L_umoddi3) || defined (L_moddi3))
static inline __attribute__ ((__always_inline__))
#endif
UWtype
__udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
{
UWtype q, r;
UWtype c0, c1, b1;
if ((Wtype) d >= 0)
{
if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
{
/* Dividend, divisor, and quotient are nonnegative. */
sdiv_qrnnd (q, r, a1, a0, d);
}
else
{
/* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d. */
sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
/* Divide (c1*2^32 + c0) by d. */
sdiv_qrnnd (q, r, c1, c0, d);
/* Add 2^31 to quotient. */
q += (UWtype) 1 << (W_TYPE_SIZE - 1);
}
}
else
{
b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
c1 = a1 >> 1; /* A/2 */
c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
{
sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
if ((d & 1) != 0)
{
if (r >= q)
r = r - q;
else if (q - r <= d)
{
r = r - q + d;
q--;
}
else
{
r = r - q + 2*d;
q -= 2;
}
}
}
else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
{
c1 = (b1 - 1) - c1;
c0 = ~c0; /* logical NOT */
sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
q = ~q; /* (A/2)/b1 */
r = (b1 - 1) - r;
r = 2*r + (a0 & 1); /* A/(2*b1) */
if ((d & 1) != 0)
{
if (r >= q)
r = r - q;
else if (q - r <= d)
{
r = r - q + d;
q--;
}
else
{
r = r - q + 2*d;
q -= 2;
}
}
}
else /* Implies c1 = b1 */
{ /* Hence a1 = d - 1 = 2*b1 - 1 */
if (a0 >= -d)
{
q = -1;
r = a0 + d;
}
else
{
q = -2;
r = a0 + 2*d;
}
}
}
*rp = r;
return q;
}
#else
/* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
UWtype
__udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
UWtype a1 __attribute__ ((__unused__)),
UWtype a0 __attribute__ ((__unused__)),
UWtype d __attribute__ ((__unused__)))
Fix more warnings... * c-lang.c (finish_file): Wrap variable `void_list_node' with macro test !ASM_OUTPUT_CONSTRUCTOR || !ASM_OUTPUT_DESTRUCTOR. * calls.c (emit_call_1): Wrap variable `already_popped' with macro test !ACCUMULATE_OUTGOING_ARGS. * collect2.c (write_c_file_glob): Wrap function definition in macro test !LD_INIT_SWITCH. * combine.c (try_combine): Wrap variables `cc_use' and `compare_mode' in macro test EXTRA_CC_MODES. * cpplib.c (do_ident): Remove unused variable `len'. (skip_if_group): Remove unused variables `at_beg_of_line' and `after_ident'. (cpp_get_token): Remove unused variable `dummy'. * dbxout.c (scope_labelno): Move static variable definition inside the one function scope where it is used. (dbxout_function_end): Wrap prototype and definition in macro test !NO_DBX_FUNCTION_END. * dwarf2out.c (add_subscript_info): Wrap variable `dimension_number' in macro test !MIPS_DEBUGGING_INFO. * expr.c (expand_builtin_setjmp): Move declaration of variable `i' into the scope where it is used. Wrap empty else-statement body in braces. * fix-header.c: Fix typo in comment. (inf_skip_spaces): Cast results of INF_UNGET to (void). (check_protection, main): Likewise. * flow.c (find_basic_blocks_1): Remove dangling comment text. * function.c (contains): Wrap prototype and definition in macro test HAVE_prologue || HAVE_epilogue. (fixup_var_refs_1): Remove unused variable `width'. * gen-protos.c (main): Remove unused variable `optr'. * haifa-sched.c (debug_control_flow): Remove unused variable `j'. * libgcc2.c (__udiv_w_sdiv): Provide dummy return value of 0. (__sjpopnthrow): Remove unused variable `jmpbuf'. (__throw): Remove unused variable `val'. * protoize.c: Check for a previously existing definition before defining *_OK macros. * scan-decls.c (scan_decls): Remove unused variable `old_written'. From-SVN: r18654
1998-03-18 08:18:06 +01:00
{
return 0;
}
#endif
#endif
#if (defined (L_udivdi3) || defined (L_divdi3) || \
defined (L_umoddi3) || defined (L_moddi3) || \
defined (L_divmoddi4))
#define L_udivmoddi4
#endif
#ifdef L_clz
const UQItype __clz_tab[256] =
1992-01-28 04:44:05 +01:00
{
0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
1992-01-28 04:44:05 +01:00
};
#endif
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
#ifdef L_clzsi2
#undef int
int
__clzSI2 (UWtype x)
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
{
Wtype ret;
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
count_leading_zeros (ret, x);
return ret;
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
}
#endif
#ifdef L_clzdi2
#undef int
int
__clzDI2 (UDWtype x)
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
{
const DWunion uu = {.ll = x};
UWtype word;
Wtype ret, add;
if (uu.s.high)
word = uu.s.high, add = 0;
else
word = uu.s.low, add = W_TYPE_SIZE;
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
count_leading_zeros (ret, word);
return ret + add;
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
}
#endif
#ifdef L_ctzsi2
#undef int
int
__ctzSI2 (UWtype x)
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
{
Wtype ret;
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
count_trailing_zeros (ret, x);
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
return ret;
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
}
#endif
#ifdef L_ctzdi2
#undef int
int
__ctzDI2 (UDWtype x)
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
{
const DWunion uu = {.ll = x};
UWtype word;
Wtype ret, add;
if (uu.s.low)
word = uu.s.low, add = 0;
else
word = uu.s.high, add = W_TYPE_SIZE;
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
count_trailing_zeros (ret, word);
return ret + add;
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
}
#endif
Makefile.in (lib2funcs): Add _clrsbsi2 and _clrsbdi2. libgcc/ * Makefile.in (lib2funcs): Add _clrsbsi2 and _clrsbdi2. * libgcc-std.ver.in (GCC_4.7.0): New section. gcc/ * doc/extend.texi (__builtin_clrsb, __builtin_clrsbl, __builtin_clrsbll): Document. * doc/rtl.texi (clrsb): New entry. * optabs.c (widen_leading): Renamed from widen_clz. New argument UNOPTAB. All callers changed. Use UNOPTAB instead of clz_optab. (expand_unop): Handle clrsb_optab. (init_optabs): Initialize it. * optabs.h (enum optab_index): New entry OTI_clrsb. (clrsb_optab): Define. * genopinit.c (optabs): Add an entry for it. * builtins.c (expand_builtin): Handle clrsb builtin functions. * builtins.def (BUILT_IN_CLRSB, BUILT_IN_CLRSBIMAX, BUILT_IN_CLRSBL, BUILT_IN_CLRSBLL): New. * rtl.def (CLRSB): New code. * dwarf2out.c (mem_loc_descriptor): Handle it. * simplify-rtx.c (simplify_const_unary_operation): Likewise. Use op_mode rather than mode when optimizing ffs, clz, ctz, parity and popcount. * libgcc2.c (__clrsbSI2, __clrsbDI2): New functions. * libgcc2.h (__clrsbSI2, __clrsbDI2): Define and declare. (__ctzDI2): Move declaration. * config/bfin/bfin.md (clrsbsi2): New expander. (signbitssi2): Use the CLRSB rtx. (clrsbhi2): Renamed from signbitshi2. Use the CLRSB rtx. * config/bfin/bfin.c (bdesc_1arg): Changed accordingly. gcc/testsuite/ * gcc.c-torture/excute/builtin-bitops-1.c (MAKE_FUNS): Make my_clrsb test functions. (main): Test clrsb. * gcc.dg/builtin-protos-1.c (test_s, test_u, test_sl, test_ul, test_sll, test_ull): Add clrsb tests. * gcc.dg/torture/builtin-attr-1.c: Add tests for clrsb, clrsbl, clrsbll. From-SVN: r175261
2011-06-21 16:16:39 +02:00
#ifdef L_clrsbsi2
#undef int
int
__clrsbSI2 (Wtype x)
{
Wtype ret;
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
Makefile.in (lib2funcs): Add _clrsbsi2 and _clrsbdi2. libgcc/ * Makefile.in (lib2funcs): Add _clrsbsi2 and _clrsbdi2. * libgcc-std.ver.in (GCC_4.7.0): New section. gcc/ * doc/extend.texi (__builtin_clrsb, __builtin_clrsbl, __builtin_clrsbll): Document. * doc/rtl.texi (clrsb): New entry. * optabs.c (widen_leading): Renamed from widen_clz. New argument UNOPTAB. All callers changed. Use UNOPTAB instead of clz_optab. (expand_unop): Handle clrsb_optab. (init_optabs): Initialize it. * optabs.h (enum optab_index): New entry OTI_clrsb. (clrsb_optab): Define. * genopinit.c (optabs): Add an entry for it. * builtins.c (expand_builtin): Handle clrsb builtin functions. * builtins.def (BUILT_IN_CLRSB, BUILT_IN_CLRSBIMAX, BUILT_IN_CLRSBL, BUILT_IN_CLRSBLL): New. * rtl.def (CLRSB): New code. * dwarf2out.c (mem_loc_descriptor): Handle it. * simplify-rtx.c (simplify_const_unary_operation): Likewise. Use op_mode rather than mode when optimizing ffs, clz, ctz, parity and popcount. * libgcc2.c (__clrsbSI2, __clrsbDI2): New functions. * libgcc2.h (__clrsbSI2, __clrsbDI2): Define and declare. (__ctzDI2): Move declaration. * config/bfin/bfin.md (clrsbsi2): New expander. (signbitssi2): Use the CLRSB rtx. (clrsbhi2): Renamed from signbitshi2. Use the CLRSB rtx. * config/bfin/bfin.c (bdesc_1arg): Changed accordingly. gcc/testsuite/ * gcc.c-torture/excute/builtin-bitops-1.c (MAKE_FUNS): Make my_clrsb test functions. (main): Test clrsb. * gcc.dg/builtin-protos-1.c (test_s, test_u, test_sl, test_ul, test_sll, test_ull): Add clrsb tests. * gcc.dg/torture/builtin-attr-1.c: Add tests for clrsb, clrsbl, clrsbll. From-SVN: r175261
2011-06-21 16:16:39 +02:00
if (x < 0)
x = ~x;
if (x == 0)
return W_TYPE_SIZE - 1;
count_leading_zeros (ret, x);
return ret - 1;
}
#endif
#ifdef L_clrsbdi2
#undef int
int
__clrsbDI2 (DWtype x)
{
const DWunion uu = {.ll = x};
UWtype word;
Wtype ret, add;
if (uu.s.high == 0)
word = uu.s.low, add = W_TYPE_SIZE;
else if (uu.s.high == -1)
word = ~uu.s.low, add = W_TYPE_SIZE;
else if (uu.s.high >= 0)
word = uu.s.high, add = 0;
else
word = ~uu.s.high, add = 0;
if (word == 0)
ret = W_TYPE_SIZE;
else
count_leading_zeros (ret, word);
return ret + add - 1;
}
#endif
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
#ifdef L_popcount_tab
const UQItype __popcount_tab[256] =
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
{
0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
};
#endif
#if defined(L_popcountsi2) || defined(L_popcountdi2)
#define POPCOUNTCST2(x) (((UWtype) x << __CHAR_BIT__) | x)
#define POPCOUNTCST4(x) (((UWtype) x << (2 * __CHAR_BIT__)) | x)
#define POPCOUNTCST8(x) (((UWtype) x << (4 * __CHAR_BIT__)) | x)
#if W_TYPE_SIZE == __CHAR_BIT__
#define POPCOUNTCST(x) x
#elif W_TYPE_SIZE == 2 * __CHAR_BIT__
#define POPCOUNTCST(x) POPCOUNTCST2 (x)
#elif W_TYPE_SIZE == 4 * __CHAR_BIT__
#define POPCOUNTCST(x) POPCOUNTCST4 (POPCOUNTCST2 (x))
#elif W_TYPE_SIZE == 8 * __CHAR_BIT__
#define POPCOUNTCST(x) POPCOUNTCST8 (POPCOUNTCST4 (POPCOUNTCST2 (x)))
#endif
#endif
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
#ifdef L_popcountsi2
#undef int
int
__popcountSI2 (UWtype x)
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
{
/* Force table lookup on targets like AVR and RL78 which only
pretend they have LIBGCC2_UNITS_PER_WORD 4, but actually
have 1, and other small word targets. */
#if __SIZEOF_INT__ > 2 && defined (POPCOUNTCST) && __CHAR_BIT__ == 8
x = x - ((x >> 1) & POPCOUNTCST (0x55));
x = (x & POPCOUNTCST (0x33)) + ((x >> 2) & POPCOUNTCST (0x33));
x = (x + (x >> 4)) & POPCOUNTCST (0x0F);
return (x * POPCOUNTCST (0x01)) >> (W_TYPE_SIZE - __CHAR_BIT__);
#else
int i, ret = 0;
for (i = 0; i < W_TYPE_SIZE; i += 8)
ret += __popcount_tab[(x >> i) & 0xff];
return ret;
#endif
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
}
#endif
#ifdef L_popcountdi2
#undef int
int
__popcountDI2 (UDWtype x)
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
{
/* Force table lookup on targets like AVR and RL78 which only
pretend they have LIBGCC2_UNITS_PER_WORD 4, but actually
have 1, and other small word targets. */
#if __SIZEOF_INT__ > 2 && defined (POPCOUNTCST) && __CHAR_BIT__ == 8
const DWunion uu = {.ll = x};
UWtype x1 = uu.s.low, x2 = uu.s.high;
x1 = x1 - ((x1 >> 1) & POPCOUNTCST (0x55));
x2 = x2 - ((x2 >> 1) & POPCOUNTCST (0x55));
x1 = (x1 & POPCOUNTCST (0x33)) + ((x1 >> 2) & POPCOUNTCST (0x33));
x2 = (x2 & POPCOUNTCST (0x33)) + ((x2 >> 2) & POPCOUNTCST (0x33));
x1 = (x1 + (x1 >> 4)) & POPCOUNTCST (0x0F);
x2 = (x2 + (x2 >> 4)) & POPCOUNTCST (0x0F);
x1 += x2;
return (x1 * POPCOUNTCST (0x01)) >> (W_TYPE_SIZE - __CHAR_BIT__);
#else
int i, ret = 0;
for (i = 0; i < 2*W_TYPE_SIZE; i += 8)
ret += __popcount_tab[(x >> i) & 0xff];
return ret;
#endif
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
}
#endif
#ifdef L_paritysi2
#undef int
int
__paritySI2 (UWtype x)
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
{
#if W_TYPE_SIZE > 64
# error "fill out the table"
#endif
#if W_TYPE_SIZE > 32
x ^= x >> 32;
#endif
#if W_TYPE_SIZE > 16
x ^= x >> 16;
#endif
x ^= x >> 8;
x ^= x >> 4;
x &= 0xf;
return (0x6996 >> x) & 1;
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
}
#endif
#ifdef L_paritydi2
#undef int
int
__parityDI2 (UDWtype x)
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
{
const DWunion uu = {.ll = x};
UWtype nx = uu.s.low ^ uu.s.high;
#if W_TYPE_SIZE > 64
# error "fill out the table"
#endif
#if W_TYPE_SIZE > 32
nx ^= nx >> 32;
#endif
#if W_TYPE_SIZE > 16
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
nx ^= nx >> 16;
#endif
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
nx ^= nx >> 8;
nx ^= nx >> 4;
nx &= 0xf;
return (0x6996 >> nx) & 1;
[multiple changes] 2003-02-01 Richard Henderson <rth@redhat.com> * optabs.c (expand_unop): Use word_mode for outmode of bit scaners. * libgcc2.c (__ffsdi2, __clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2): Change return type to Wtype. * libgcc-std.ver (GCC_3.4): Fix inheritance. * config/i386/i386.md (ffssi2): Use nonimmediate_operand for expander input constraint. 2003-02-01 Falk Hueffner <falk.hueffner@student.uni-tuebingen.de> * optabs.h (optab_index): Add OTI_clz, OTI_ctz, OTI_popcount and OTI_parity. (clz_optab, ctz_optab, popcount_optab, parity_optab): New. * optabs.c (widen_clz, expand_parity): New. (expand_unop): Handle clz and parity. Hardcode SImode as outmode for libcalls to clz, ctz, popcount, and parity. (init_optabs): Init clz_optab, ctz_optab, popcount_optab and parity_optab, and set up libfunc handlers. * libgcc2.c (__clzsi2, __clzdi2, __ctzsi2, __ctzdi2, __popcountsi2, __popcountdi2, __paritysi2 __paritydi2, __popcount_tab): New. * libgcc2.h: Declare them. * libgcc-std.ver (GCC_3.4): Add new functions from libgcc2.c. * genopinit.c (optabs): Add clz_optab, ctz_optab, popcount_optab and parity_optab. * builtin-types.def (BT_FN_INT_LONG, BT_FN_INT_LONGLONG): New. * builtins.def (BUILT_IN_CLZ, BUILT_IN_CTZ, BUILT_IN_POPCOUNT, BUILT_IN_PARITY, BUILT_IN_FFSL, BUILT_IN_CLZL, BUILT_IN_CTZL, BUILT_IN_POPCOUNTL, BUILT_IN_PARITYL, BUILT_IN_FFSLL, BUILT_IN_CLZLL, BUILT_IN_CTZLL, BUILT_IN_POPCOUNTLL, BUILT_IN_PARITYLL): New. * builtins.c (expand_builtin_unop): Rename from expand_builtin_ffs and add optab argument. (expand_builtin): Expand BUILT_IN_{FFS,CLZ,POPCOUNT,PARITY}*. * tree.def (CLZ_EXPR, CTZ_EXPR, POPCOUNT_EXPR, PARITY_EXPR): New. * expr.c (expand_expr): Handle them. * fold-const.c (tree_expr_nonnegative_p): Likewise. * rtl.def (CLZ, CTZ, POPCOUNT, PARITY): New. * reload1.c (eliminate_regs): Handle them. (elimination_effects): Likewise. * function.c (instantiate_virtual_regs_1): Likewise * genattrtab.c (check_attr_value): Likewise. * simplify-rtx.c (simplify_unary_operation): Likewise. * c-common.c (c_common_truthvalue_conversion): Handle POPCOUNT_EXPR. * combine.c (combine_simplify_rtx): Handle POPCOUNT and PARITY. (nonzero_bits): Handle CLZ, CTZ, POPCOUNT and PARITY. * config/alpha/alpha.md (clzdi2, ctzdi2, popcountdi2): New. * config/arm/arm.c (arm_init_builtins): Rename __builtin_clz to __builtin_arm_clz. * Makefile.in (LIB2FUNCS_1, LIB2FUNCS_2): Move... * mklibgcc.in (lib2funcs): ...here and merge. Add new members. * doc/extend.texi (Other Builtins): Add new builtins. * doc/md.texi (Standard Names): Add new patterns. From-SVN: r62252
2003-02-01 20:00:02 +01:00
}
#endif
#ifdef L_udivmoddi4
#ifdef TARGET_HAS_NO_HW_DIVIDE
#if (defined (L_udivdi3) || defined (L_divdi3) || \
defined (L_umoddi3) || defined (L_moddi3) || \
defined (L_divmoddi4))
static inline __attribute__ ((__always_inline__))
#endif
UDWtype
__udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
{
UDWtype q = 0, r = n, y = d;
UWtype lz1, lz2, i, k;
/* Implements align divisor shift dividend method. This algorithm
aligns the divisor under the dividend and then perform number of
test-subtract iterations which shift the dividend left. Number of
iterations is k + 1 where k is the number of bit positions the
divisor must be shifted left to align it under the dividend.
quotient bits can be saved in the rightmost positions of the dividend
as it shifts left on each test-subtract iteration. */
if (y <= r)
{
lz1 = __builtin_clzll (d);
lz2 = __builtin_clzll (n);
k = lz1 - lz2;
y = (y << k);
/* Dividend can exceed 2 ^ (width - 1) - 1 but still be less than the
aligned divisor. Normal iteration can drops the high order bit
of the dividend. Therefore, first test-subtract iteration is a
special case, saving its quotient bit in a separate location and
not shifting the dividend. */
if (r >= y)
{
r = r - y;
q = (1ULL << k);
}
if (k > 0)
{
y = y >> 1;
/* k additional iterations where k regular test subtract shift
dividend iterations are done. */
i = k;
do
{
if (r >= y)
r = ((r - y) << 1) + 1;
else
r = (r << 1);
i = i - 1;
} while (i != 0);
/* First quotient bit is combined with the quotient bits resulting
from the k regular iterations. */
q = q + r;
r = r >> k;
q = q - (r << k);
}
}
if (rp)
*rp = r;
return q;
}
#else
1992-01-28 04:44:05 +01:00
#if (defined (L_udivdi3) || defined (L_divdi3) || \
defined (L_umoddi3) || defined (L_moddi3) || \
defined (L_divmoddi4))
static inline __attribute__ ((__always_inline__))
#endif
UDWtype
__udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
1992-01-28 04:44:05 +01:00
{
const DWunion nn = {.ll = n};
const DWunion dd = {.ll = d};
DWunion rr;
UWtype d0, d1, n0, n1, n2;
UWtype q0, q1;
UWtype b, bm;
1992-01-28 04:44:05 +01:00
d0 = dd.s.low;
d1 = dd.s.high;
n0 = nn.s.low;
n1 = nn.s.high;
#if !UDIV_NEEDS_NORMALIZATION
if (d1 == 0)
{
if (d0 > n1)
{
/* 0q = nn / 0D */
udiv_qrnnd (q0, n0, n1, n0, d0);
q1 = 0;
/* Remainder in n0. */
}
else
{
/* qq = NN / 0d */
if (d0 == 0)
d0 = 1 / d0; /* Divide intentionally by zero. */
udiv_qrnnd (q1, n1, 0, n1, d0);
udiv_qrnnd (q0, n0, n1, n0, d0);
/* Remainder in n0. */
}
if (rp != 0)
{
rr.s.low = n0;
rr.s.high = 0;
*rp = rr.ll;
}
}
#else /* UDIV_NEEDS_NORMALIZATION */
if (d1 == 0)
{
if (d0 > n1)
{
/* 0q = nn / 0D */
count_leading_zeros (bm, d0);
if (bm != 0)
{
/* Normalize, i.e. make the most significant bit of the
denominator set. */
d0 = d0 << bm;
n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
1992-01-28 04:44:05 +01:00
n0 = n0 << bm;
}
udiv_qrnnd (q0, n0, n1, n0, d0);
q1 = 0;
/* Remainder in n0 >> bm. */
}
else
{
/* qq = NN / 0d */
if (d0 == 0)
d0 = 1 / d0; /* Divide intentionally by zero. */
count_leading_zeros (bm, d0);
if (bm == 0)
{
/* From (n1 >= d0) /\ (the most significant bit of d0 is set),
conclude (the most significant bit of n1 is set) /\ (the
leading quotient digit q1 = 1).
This special case is necessary, not an optimization.
(Shifts counts of W_TYPE_SIZE are undefined.) */
1992-01-28 04:44:05 +01:00
n1 -= d0;
q1 = 1;
}
else
{
/* Normalize. */
b = W_TYPE_SIZE - bm;
1992-01-28 04:44:05 +01:00
d0 = d0 << bm;
n2 = n1 >> b;
n1 = (n1 << bm) | (n0 >> b);
n0 = n0 << bm;
udiv_qrnnd (q1, n1, n2, n1, d0);
}
1996-07-04 00:07:53 +02:00
/* n1 != d0... */
1992-01-28 04:44:05 +01:00
udiv_qrnnd (q0, n0, n1, n0, d0);
/* Remainder in n0 >> bm. */
}
if (rp != 0)
{
rr.s.low = n0 >> bm;
rr.s.high = 0;
*rp = rr.ll;
}
}
#endif /* UDIV_NEEDS_NORMALIZATION */
else
{
if (d1 > n1)
{
/* 00 = nn / DD */
q0 = 0;
q1 = 0;
/* Remainder in n1n0. */
if (rp != 0)
{
rr.s.low = n0;
rr.s.high = n1;
*rp = rr.ll;
}
}
else
{
/* 0q = NN / dd */
count_leading_zeros (bm, d1);
if (bm == 0)
{
/* From (n1 >= d1) /\ (the most significant bit of d1 is set),
conclude (the most significant bit of n1 is set) /\ (the
quotient digit q0 = 0 or 1).
This special case is necessary, not an optimization. */
/* The condition on the next line takes advantage of that
n1 >= d1 (true due to program flow). */
if (n1 > d1 || n0 >= d0)
{
q0 = 1;
sub_ddmmss (n1, n0, n1, n0, d1, d0);
}
else
q0 = 0;
q1 = 0;
if (rp != 0)
{
rr.s.low = n0;
rr.s.high = n1;
*rp = rr.ll;
}
}
else
{
UWtype m1, m0;
1992-01-28 04:44:05 +01:00
/* Normalize. */
b = W_TYPE_SIZE - bm;
1992-01-28 04:44:05 +01:00
d1 = (d1 << bm) | (d0 >> b);
d0 = d0 << bm;
n2 = n1 >> b;
n1 = (n1 << bm) | (n0 >> b);
n0 = n0 << bm;
udiv_qrnnd (q0, n1, n2, n1, d1);
umul_ppmm (m1, m0, q0, d0);
if (m1 > n1 || (m1 == n1 && m0 > n0))
{
q0--;
sub_ddmmss (m1, m0, m1, m0, d1, d0);
}
q1 = 0;
/* Remainder in (n1n0 - m1m0) >> bm. */
if (rp != 0)
{
sub_ddmmss (n1, n0, n1, n0, m1, m0);
rr.s.low = (n1 << b) | (n0 >> bm);
rr.s.high = n1 >> bm;
*rp = rr.ll;
}
}
}
}
const DWunion ww = {{.low = q0, .high = q1}};
1992-01-28 04:44:05 +01:00
return ww.ll;
}
#endif
#endif
1992-01-28 04:44:05 +01:00
#ifdef L_divdi3
DWtype
__divdi3 (DWtype u, DWtype v)
1992-01-28 04:44:05 +01:00
{
libgcc2.h (word_type): Type definition removed. 2007-07-06 Andreas Krebbel <krebbel1@de.ibm.com> * libgcc2.h (word_type): Type definition removed. (cmp_return_type, shift_count_type): Type definitions added. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * libgcc2.c (__udivmoddi4, __moddi3): Type of local variable c changed from word_type to Wtype. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * c-common.c (handle_mode_attribute): Handling for libgcc_cmp_return and libgcc_shift_count attribute added. * target-def.h (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): New target hooks defined. (TARGET_INITIALIZER): New target hooks added. * targhooks.c (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Default implementations for the new target hooks added. * targhooks.h (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Function prototypes added. * target.h (struct gcc_target): Fields for the new target hooks added. * optabs.c (expand_binop): Use shift_count_mode when expanding shift as library call. (prepare_cmp_insn): Use cmp_return_mode when expanding comparison as library call. * doc/tm.texi (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Documentation added. * config/s390/s390.c (s390_libgcc_cmp_return_mode, s390_libgcc_shift_count_mode): Functions added. (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Target hooks defined. From-SVN: r126410
2007-07-06 12:47:31 +02:00
Wtype c = 0;
DWunion uu = {.ll = u};
DWunion vv = {.ll = v};
DWtype w;
1992-01-28 04:44:05 +01:00
if (uu.s.high < 0)
c = ~c,
uu.ll = -uu.ll;
1992-01-28 04:44:05 +01:00
if (vv.s.high < 0)
c = ~c,
vv.ll = -vv.ll;
1992-01-28 04:44:05 +01:00
w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
1992-01-28 04:44:05 +01:00
if (c)
w = -w;
1992-01-28 04:44:05 +01:00
return w;
}
#endif
#ifdef L_moddi3
DWtype
__moddi3 (DWtype u, DWtype v)
1992-01-28 04:44:05 +01:00
{
libgcc2.h (word_type): Type definition removed. 2007-07-06 Andreas Krebbel <krebbel1@de.ibm.com> * libgcc2.h (word_type): Type definition removed. (cmp_return_type, shift_count_type): Type definitions added. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * libgcc2.c (__udivmoddi4, __moddi3): Type of local variable c changed from word_type to Wtype. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * c-common.c (handle_mode_attribute): Handling for libgcc_cmp_return and libgcc_shift_count attribute added. * target-def.h (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): New target hooks defined. (TARGET_INITIALIZER): New target hooks added. * targhooks.c (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Default implementations for the new target hooks added. * targhooks.h (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Function prototypes added. * target.h (struct gcc_target): Fields for the new target hooks added. * optabs.c (expand_binop): Use shift_count_mode when expanding shift as library call. (prepare_cmp_insn): Use cmp_return_mode when expanding comparison as library call. * doc/tm.texi (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Documentation added. * config/s390/s390.c (s390_libgcc_cmp_return_mode, s390_libgcc_shift_count_mode): Functions added. (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Target hooks defined. From-SVN: r126410
2007-07-06 12:47:31 +02:00
Wtype c = 0;
DWunion uu = {.ll = u};
DWunion vv = {.ll = v};
DWtype w;
1992-01-28 04:44:05 +01:00
if (uu.s.high < 0)
c = ~c,
uu.ll = -uu.ll;
1992-01-28 04:44:05 +01:00
if (vv.s.high < 0)
vv.ll = -vv.ll;
1992-01-28 04:44:05 +01:00
(void) __udivmoddi4 (uu.ll, vv.ll, (UDWtype*)&w);
1992-01-28 04:44:05 +01:00
if (c)
w = -w;
1992-01-28 04:44:05 +01:00
return w;
}
#endif
#ifdef L_divmoddi4
DWtype
__divmoddi4 (DWtype u, DWtype v, DWtype *rp)
{
Wtype c1 = 0, c2 = 0;
DWunion uu = {.ll = u};
DWunion vv = {.ll = v};
DWtype w;
DWtype r;
if (uu.s.high < 0)
c1 = ~c1, c2 = ~c2,
uu.ll = -uu.ll;
if (vv.s.high < 0)
c1 = ~c1,
vv.ll = -vv.ll;
w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype*)&r);
if (c1)
w = -w;
if (c2)
r = -r;
*rp = r;
return w;
}
#endif
1992-01-28 04:44:05 +01:00
#ifdef L_umoddi3
UDWtype
__umoddi3 (UDWtype u, UDWtype v)
1992-01-28 04:44:05 +01:00
{
UDWtype w;
1992-01-28 04:44:05 +01:00
(void) __udivmoddi4 (u, v, &w);
return w;
}
#endif
#ifdef L_udivdi3
UDWtype
__udivdi3 (UDWtype n, UDWtype d)
1992-01-28 04:44:05 +01:00
{
return __udivmoddi4 (n, d, (UDWtype *) 0);
1992-01-28 04:44:05 +01:00
}
#endif
#ifdef L_cmpdi2
libgcc2.h (word_type): Type definition removed. 2007-07-06 Andreas Krebbel <krebbel1@de.ibm.com> * libgcc2.h (word_type): Type definition removed. (cmp_return_type, shift_count_type): Type definitions added. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * libgcc2.c (__udivmoddi4, __moddi3): Type of local variable c changed from word_type to Wtype. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * c-common.c (handle_mode_attribute): Handling for libgcc_cmp_return and libgcc_shift_count attribute added. * target-def.h (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): New target hooks defined. (TARGET_INITIALIZER): New target hooks added. * targhooks.c (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Default implementations for the new target hooks added. * targhooks.h (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Function prototypes added. * target.h (struct gcc_target): Fields for the new target hooks added. * optabs.c (expand_binop): Use shift_count_mode when expanding shift as library call. (prepare_cmp_insn): Use cmp_return_mode when expanding comparison as library call. * doc/tm.texi (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Documentation added. * config/s390/s390.c (s390_libgcc_cmp_return_mode, s390_libgcc_shift_count_mode): Functions added. (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Target hooks defined. From-SVN: r126410
2007-07-06 12:47:31 +02:00
cmp_return_type
__cmpdi2 (DWtype a, DWtype b)
1992-01-28 04:44:05 +01:00
{
return (a > b) - (a < b) + 1;
1992-01-28 04:44:05 +01:00
}
#endif
#ifdef L_ucmpdi2
libgcc2.h (word_type): Type definition removed. 2007-07-06 Andreas Krebbel <krebbel1@de.ibm.com> * libgcc2.h (word_type): Type definition removed. (cmp_return_type, shift_count_type): Type definitions added. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * libgcc2.c (__udivmoddi4, __moddi3): Type of local variable c changed from word_type to Wtype. (__lshrdi3, __ashldi3, __ashrdi3): word_type of second parameter replaced with shift_count_type. (__cmpdi2, __ucmpdi2): word_type of return type replaced with cmp_return_type. * c-common.c (handle_mode_attribute): Handling for libgcc_cmp_return and libgcc_shift_count attribute added. * target-def.h (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): New target hooks defined. (TARGET_INITIALIZER): New target hooks added. * targhooks.c (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Default implementations for the new target hooks added. * targhooks.h (default_libgcc_cmp_return_mode, default_libgcc_shift_count_mode): Function prototypes added. * target.h (struct gcc_target): Fields for the new target hooks added. * optabs.c (expand_binop): Use shift_count_mode when expanding shift as library call. (prepare_cmp_insn): Use cmp_return_mode when expanding comparison as library call. * doc/tm.texi (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Documentation added. * config/s390/s390.c (s390_libgcc_cmp_return_mode, s390_libgcc_shift_count_mode): Functions added. (TARGET_LIBGCC_CMP_RETURN_MODE, TARGET_LIBGCC_SHIFT_COUNT_MODE): Target hooks defined. From-SVN: r126410
2007-07-06 12:47:31 +02:00
cmp_return_type
__ucmpdi2 (UDWtype a, UDWtype b)
1992-01-28 04:44:05 +01:00
{
return (a > b) - (a < b) + 1;
1992-01-28 04:44:05 +01:00
}
#endif
#if defined(L_fixunstfdi) && LIBGCC2_HAS_TF_MODE
UDWtype
__fixunstfDI (TFtype a)
{
if (a < 0)
return 0;
/* Compute high word of result, as a flonum. */
const TFtype b = (a / Wtype_MAXp1_F);
/* Convert that to fixed (but not to DWtype!),
and shift it into the high word. */
UDWtype v = (UWtype) b;
v <<= W_TYPE_SIZE;
/* Remove high part from the TFtype, leaving the low part as flonum. */
a -= (TFtype)v;
/* Convert that to fixed (but not to DWtype!) and add it in.
Sometimes A comes out negative. This is significant, since
A has more bits than a long int does. */
if (a < 0)
v -= (UWtype) (- a);
else
v += (UWtype) a;
return v;
}
#endif
#if defined(L_fixtfdi) && LIBGCC2_HAS_TF_MODE
DWtype
__fixtfdi (TFtype a)
{
if (a < 0)
return - __fixunstfDI (-a);
return __fixunstfDI (a);
}
#endif
#if defined(L_fixunsxfdi) && LIBGCC2_HAS_XF_MODE
UDWtype
__fixunsxfDI (XFtype a)
{
if (a < 0)
return 0;
/* Compute high word of result, as a flonum. */
const XFtype b = (a / Wtype_MAXp1_F);
/* Convert that to fixed (but not to DWtype!),
and shift it into the high word. */
UDWtype v = (UWtype) b;
v <<= W_TYPE_SIZE;
/* Remove high part from the XFtype, leaving the low part as flonum. */
a -= (XFtype)v;
/* Convert that to fixed (but not to DWtype!) and add it in.
Sometimes A comes out negative. This is significant, since
A has more bits than a long int does. */
if (a < 0)
v -= (UWtype) (- a);
else
v += (UWtype) a;
return v;
}
#endif
#if defined(L_fixxfdi) && LIBGCC2_HAS_XF_MODE
DWtype
__fixxfdi (XFtype a)
{
if (a < 0)
return - __fixunsxfDI (-a);
return __fixunsxfDI (a);
}
#endif
#if defined(L_fixunsdfdi) && LIBGCC2_HAS_DF_MODE
UDWtype
__fixunsdfDI (DFtype a)
1992-01-28 04:44:05 +01:00
{
/* Get high part of result. The division here will just moves the radix
point and will not cause any rounding. Then the conversion to integral
type chops result as desired. */
const UWtype hi = a / Wtype_MAXp1_F;
1992-01-28 04:44:05 +01:00
/* Get low part of result. Convert `hi' to floating type and scale it back,
then subtract this from the number being converted. This leaves the low
part. Convert that to integral type. */
const UWtype lo = a - (DFtype) hi * Wtype_MAXp1_F;
/* Assemble result from the two parts. */
return ((UDWtype) hi << W_TYPE_SIZE) | lo;
1992-01-28 04:44:05 +01:00
}
#endif
#if defined(L_fixdfdi) && LIBGCC2_HAS_DF_MODE
DWtype
__fixdfdi (DFtype a)
1992-01-28 04:44:05 +01:00
{
if (a < 0)
return - __fixunsdfDI (-a);
return __fixunsdfDI (a);
1992-01-28 04:44:05 +01:00
}
#endif
#if defined(L_fixunssfdi) && LIBGCC2_HAS_SF_MODE
UDWtype
__fixunssfDI (SFtype a)
1992-01-28 04:44:05 +01:00
{
#if LIBGCC2_HAS_DF_MODE
/* Convert the SFtype to a DFtype, because that is surely not going
1992-01-28 04:44:05 +01:00
to lose any bits. Some day someone else can write a faster version
that avoids converting to DFtype, and verify it really works right. */
const DFtype dfa = a;
1992-01-28 04:44:05 +01:00
/* Get high part of result. The division here will just moves the radix
point and will not cause any rounding. Then the conversion to integral
type chops result as desired. */
const UWtype hi = dfa / Wtype_MAXp1_F;
1992-01-28 04:44:05 +01:00
/* Get low part of result. Convert `hi' to floating type and scale it back,
then subtract this from the number being converted. This leaves the low
part. Convert that to integral type. */
const UWtype lo = dfa - (DFtype) hi * Wtype_MAXp1_F;
/* Assemble result from the two parts. */
return ((UDWtype) hi << W_TYPE_SIZE) | lo;
#elif FLT_MANT_DIG < W_TYPE_SIZE
if (a < 1)
return 0;
if (a < Wtype_MAXp1_F)
return (UWtype)a;
if (a < Wtype_MAXp1_F * Wtype_MAXp1_F)
{
/* Since we know that there are fewer significant bits in the SFmode
quantity than in a word, we know that we can convert out all the
significant bits in one step, and thus avoid losing bits. */
/* ??? This following loop essentially performs frexpf. If we could
use the real libm function, or poke at the actual bits of the fp
format, it would be significantly faster. */
UWtype shift = 0, counter;
SFtype msb;
a /= Wtype_MAXp1_F;
for (counter = W_TYPE_SIZE / 2; counter != 0; counter >>= 1)
{
SFtype counterf = (UWtype)1 << counter;
if (a >= counterf)
{
shift |= counter;
a /= counterf;
}
}
/* Rescale into the range of one word, extract the bits of that
one word, and shift the result into position. */
a *= Wtype_MAXp1_F;
counter = a;
return (DWtype)counter << shift;
}
return -1;
#else
# error
#endif
1992-01-28 04:44:05 +01:00
}
#endif
#if defined(L_fixsfdi) && LIBGCC2_HAS_SF_MODE
DWtype
__fixsfdi (SFtype a)
1992-01-28 04:44:05 +01:00
{
if (a < 0)
return - __fixunssfDI (-a);
return __fixunssfDI (a);
1992-01-28 04:44:05 +01:00
}
#endif
#if defined(L_floatdixf) && LIBGCC2_HAS_XF_MODE
XFtype
__floatdixf (DWtype u)
{
#if W_TYPE_SIZE > __LIBGCC_XF_MANT_DIG__
# error
#endif
XFtype d = (Wtype) (u >> W_TYPE_SIZE);
d *= Wtype_MAXp1_F;
d += (UWtype)u;
return d;
}
#endif
#if defined(L_floatundixf) && LIBGCC2_HAS_XF_MODE
XFtype
__floatundixf (UDWtype u)
{
#if W_TYPE_SIZE > __LIBGCC_XF_MANT_DIG__
# error
#endif
XFtype d = (UWtype) (u >> W_TYPE_SIZE);
d *= Wtype_MAXp1_F;
d += (UWtype)u;
return d;
}
#endif
#if defined(L_floatditf) && LIBGCC2_HAS_TF_MODE
TFtype
__floatditf (DWtype u)
{
#if W_TYPE_SIZE > __LIBGCC_TF_MANT_DIG__
# error
#endif
TFtype d = (Wtype) (u >> W_TYPE_SIZE);
d *= Wtype_MAXp1_F;
d += (UWtype)u;
return d;
}
#endif
#if defined(L_floatunditf) && LIBGCC2_HAS_TF_MODE
TFtype
__floatunditf (UDWtype u)
{
#if W_TYPE_SIZE > __LIBGCC_TF_MANT_DIG__
# error
1992-01-28 04:44:05 +01:00
#endif
TFtype d = (UWtype) (u >> W_TYPE_SIZE);
d *= Wtype_MAXp1_F;
d += (UWtype)u;
return d;
}
#endif
#if (defined(L_floatdisf) && LIBGCC2_HAS_SF_MODE) \
|| (defined(L_floatdidf) && LIBGCC2_HAS_DF_MODE)
#define DI_SIZE (W_TYPE_SIZE * 2)
#define F_MODE_OK(SIZE) \
(SIZE < DI_SIZE \
&& SIZE > (DI_SIZE - SIZE + FSSIZE) \
&& !AVOID_FP_TYPE_CONVERSION(SIZE))
#if defined(L_floatdisf)
#define FUNC __floatdisf
#define FSTYPE SFtype
#define FSSIZE __LIBGCC_SF_MANT_DIG__
#else
#define FUNC __floatdidf
#define FSTYPE DFtype
#define FSSIZE __LIBGCC_DF_MANT_DIG__
#endif
1992-01-28 04:44:05 +01:00
FSTYPE
FUNC (DWtype u)
1992-01-28 04:44:05 +01:00
{
#if FSSIZE >= W_TYPE_SIZE
/* When the word size is small, we never get any rounding error. */
FSTYPE f = (Wtype) (u >> W_TYPE_SIZE);
f *= Wtype_MAXp1_F;
f += (UWtype)u;
return f;
#elif (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__)) \
|| (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__)) \
|| (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
#if (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__))
# define FSIZE __LIBGCC_DF_MANT_DIG__
# define FTYPE DFtype
#elif (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__))
# define FSIZE __LIBGCC_XF_MANT_DIG__
# define FTYPE XFtype
#elif (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
# define FSIZE __LIBGCC_TF_MANT_DIG__
# define FTYPE TFtype
#else
# error
#endif
#define REP_BIT ((UDWtype) 1 << (DI_SIZE - FSIZE))
/* Protect against double-rounding error.
Represent any low-order bits, that might be truncated by a bit that
won't be lost. The bit can go in anywhere below the rounding position
of the FSTYPE. A fixed mask and bit position handles all usual
configurations. */
if (! (- ((DWtype) 1 << FSIZE) < u
&& u < ((DWtype) 1 << FSIZE)))
{
if ((UDWtype) u & (REP_BIT - 1))
{
u &= ~ (REP_BIT - 1);
u |= REP_BIT;
}
}
1992-01-28 04:44:05 +01:00
/* Do the calculation in a wider type so that we don't lose any of
the precision of the high word while multiplying it. */
FTYPE f = (Wtype) (u >> W_TYPE_SIZE);
f *= Wtype_MAXp1_F;
f += (UWtype)u;
return (FSTYPE) f;
#else
#if FSSIZE >= W_TYPE_SIZE - 2
# error
#endif
/* Finally, the word size is larger than the number of bits in the
required FSTYPE, and we've got no suitable wider type. The only
way to avoid double rounding is to special case the
extraction. */
/* If there are no high bits set, fall back to one conversion. */
if ((Wtype)u == u)
return (FSTYPE)(Wtype)u;
/* Otherwise, find the power of two. */
Wtype hi = u >> W_TYPE_SIZE;
if (hi < 0)
hi = -(UWtype) hi;
UWtype count, shift;
#if !defined (COUNT_LEADING_ZEROS_0) || COUNT_LEADING_ZEROS_0 != W_TYPE_SIZE
if (hi == 0)
count = W_TYPE_SIZE;
else
#endif
count_leading_zeros (count, hi);
/* No leading bits means u == minimum. */
if (count == 0)
return Wtype_MAXp1_F * (FSTYPE) (hi | ((UWtype) u != 0));
shift = 1 + W_TYPE_SIZE - count;
/* Shift down the most significant bits. */
hi = u >> shift;
/* If we lost any nonzero bits, set the lsb to ensure correct rounding. */
if ((UWtype)u << (W_TYPE_SIZE - shift))
hi |= 1;
/* Convert the one word of data, and rescale. */
FSTYPE f = hi, e;
if (shift == W_TYPE_SIZE)
e = Wtype_MAXp1_F;
/* The following two cases could be merged if we knew that the target
supported a native unsigned->float conversion. More often, we only
have a signed conversion, and have to add extra fixup code. */
else if (shift == W_TYPE_SIZE - 1)
e = Wtype_MAXp1_F / 2;
else
e = (Wtype)1 << shift;
return f * e;
#endif
1992-01-28 04:44:05 +01:00
}
#endif
#if (defined(L_floatundisf) && LIBGCC2_HAS_SF_MODE) \
|| (defined(L_floatundidf) && LIBGCC2_HAS_DF_MODE)
#define DI_SIZE (W_TYPE_SIZE * 2)
#define F_MODE_OK(SIZE) \
(SIZE < DI_SIZE \
&& SIZE > (DI_SIZE - SIZE + FSSIZE) \
&& !AVOID_FP_TYPE_CONVERSION(SIZE))
#if defined(L_floatundisf)
#define FUNC __floatundisf
#define FSTYPE SFtype
#define FSSIZE __LIBGCC_SF_MANT_DIG__
#else
#define FUNC __floatundidf
#define FSTYPE DFtype
#define FSSIZE __LIBGCC_DF_MANT_DIG__
#endif
FSTYPE
FUNC (UDWtype u)
{
#if FSSIZE >= W_TYPE_SIZE
/* When the word size is small, we never get any rounding error. */
FSTYPE f = (UWtype) (u >> W_TYPE_SIZE);
f *= Wtype_MAXp1_F;
f += (UWtype)u;
return f;
#elif (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__)) \
|| (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__)) \
|| (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
#if (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__))
# define FSIZE __LIBGCC_DF_MANT_DIG__
# define FTYPE DFtype
#elif (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__))
# define FSIZE __LIBGCC_XF_MANT_DIG__
# define FTYPE XFtype
#elif (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
# define FSIZE __LIBGCC_TF_MANT_DIG__
# define FTYPE TFtype
#else
# error
#endif
#define REP_BIT ((UDWtype) 1 << (DI_SIZE - FSIZE))
/* Protect against double-rounding error.
Represent any low-order bits, that might be truncated by a bit that
won't be lost. The bit can go in anywhere below the rounding position
of the FSTYPE. A fixed mask and bit position handles all usual
configurations. */
if (u >= ((UDWtype) 1 << FSIZE))
{
if ((UDWtype) u & (REP_BIT - 1))
{
u &= ~ (REP_BIT - 1);
u |= REP_BIT;
}
}
/* Do the calculation in a wider type so that we don't lose any of
the precision of the high word while multiplying it. */
FTYPE f = (UWtype) (u >> W_TYPE_SIZE);
f *= Wtype_MAXp1_F;
f += (UWtype)u;
return (FSTYPE) f;
#else
#if FSSIZE == W_TYPE_SIZE - 1
# error
#endif
/* Finally, the word size is larger than the number of bits in the
required FSTYPE, and we've got no suitable wider type. The only
way to avoid double rounding is to special case the
extraction. */
/* If there are no high bits set, fall back to one conversion. */
if ((UWtype)u == u)
return (FSTYPE)(UWtype)u;
/* Otherwise, find the power of two. */
UWtype hi = u >> W_TYPE_SIZE;
UWtype count, shift;
count_leading_zeros (count, hi);
shift = W_TYPE_SIZE - count;
/* Shift down the most significant bits. */
hi = u >> shift;
/* If we lost any nonzero bits, set the lsb to ensure correct rounding. */
if ((UWtype)u << (W_TYPE_SIZE - shift))
hi |= 1;
/* Convert the one word of data, and rescale. */
FSTYPE f = hi, e;
if (shift == W_TYPE_SIZE)
e = Wtype_MAXp1_F;
/* The following two cases could be merged if we knew that the target
supported a native unsigned->float conversion. More often, we only
have a signed conversion, and have to add extra fixup code. */
else if (shift == W_TYPE_SIZE - 1)
e = Wtype_MAXp1_F / 2;
else
e = (Wtype)1 << shift;
return f * e;
#endif
}
#endif
#if defined(L_fixunsxfsi) && LIBGCC2_HAS_XF_MODE
UWtype
__fixunsxfSI (XFtype a)
{
if (a >= - (DFtype) Wtype_MIN)
return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
return (Wtype) a;
}
#endif
#if defined(L_fixunsdfsi) && LIBGCC2_HAS_DF_MODE
UWtype
__fixunsdfSI (DFtype a)
1992-01-28 04:44:05 +01:00
{
if (a >= - (DFtype) Wtype_MIN)
return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
return (Wtype) a;
1992-01-28 04:44:05 +01:00
}
#endif
#if defined(L_fixunssfsi) && LIBGCC2_HAS_SF_MODE
UWtype
__fixunssfSI (SFtype a)
1992-01-28 04:44:05 +01:00
{
if (a >= - (SFtype) Wtype_MIN)
return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
return (Wtype) a;
1992-01-28 04:44:05 +01:00
}
#endif
/* Integer power helper used from __builtin_powi for non-constant
exponents. */
#if (defined(L_powisf2) && LIBGCC2_HAS_SF_MODE) \
|| (defined(L_powidf2) && LIBGCC2_HAS_DF_MODE) \
|| (defined(L_powixf2) && LIBGCC2_HAS_XF_MODE) \
|| (defined(L_powitf2) && LIBGCC2_HAS_TF_MODE)
# if defined(L_powisf2)
# define TYPE SFtype
# define NAME __powisf2
# elif defined(L_powidf2)
# define TYPE DFtype
# define NAME __powidf2
# elif defined(L_powixf2)
# define TYPE XFtype
# define NAME __powixf2
# elif defined(L_powitf2)
# define TYPE TFtype
# define NAME __powitf2
# endif
#undef int
#undef unsigned
TYPE
NAME (TYPE x, int m)
{
unsigned int n = m < 0 ? -(unsigned int) m : (unsigned int) m;
TYPE y = n % 2 ? x : 1;
while (n >>= 1)
{
x = x * x;
if (n % 2)
y = y * x;
}
return m < 0 ? 1/y : y;
}
1992-01-28 04:44:05 +01:00
#endif
#if((defined(L_mulhc3) || defined(L_divhc3)) && LIBGCC2_HAS_HF_MODE) \
|| ((defined(L_mulsc3) || defined(L_divsc3)) && LIBGCC2_HAS_SF_MODE) \
|| ((defined(L_muldc3) || defined(L_divdc3)) && LIBGCC2_HAS_DF_MODE) \
|| ((defined(L_mulxc3) || defined(L_divxc3)) && LIBGCC2_HAS_XF_MODE) \
|| ((defined(L_multc3) || defined(L_divtc3)) && LIBGCC2_HAS_TF_MODE)
#undef float
#undef double
#undef long
#if defined(L_mulhc3) || defined(L_divhc3)
# define MTYPE HFtype
# define CTYPE HCtype
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
# define AMTYPE SFtype
# define MODE hc
# define CEXT __LIBGCC_HF_FUNC_EXT__
# define NOTRUNC (!__LIBGCC_HF_EXCESS_PRECISION__)
#elif defined(L_mulsc3) || defined(L_divsc3)
# define MTYPE SFtype
# define CTYPE SCtype
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
# define AMTYPE DFtype
# define MODE sc
Remove LIBGCC2_TF_CEXT target macro. This patch removes the (undocumented) LIBGCC2_TF_CEXT target macro, replacing it by -fbuilding-libgcc predefines (and thereby gets rid of another LIBGCC2_LONG_DOUBLE_TYPE_SIZE conditional, though some more patches are needed before that target macro can be eliminated). This macro indicated the suffix used on __builtin_huge_val, __builtin_copysign, __builtin_fabs built-in function names to produce the names for a given floating-point mode. Predefines are added for all floating-point modes supported for libgcc, not just TFmode. These are fully accurate for modes corresponding to float, double and long double. For other modes, the suffix for *constants* is determined by the targetm.c.mode_for_suffix hook (the limit to two possible suffixes 'w' and 'q' being hardcoded in various places). This is in fact the suffix for built-in functions as well where such functions exist. * For i386, the *q functions always exist (whether or not TFmode is used for long double). The *w functions never exist (but this doesn't matter for libgcc, since no i386 configuration treats XFmode as a supported scalar mode if long double is TFmode; if __float80 were to be supported for 64-bit Android, properly such functions ought to be added). * For ia64, the *q functions exist for non-HP-UX (under HP-UX, long double is TFmode, so they aren't needed). The *w functions never exist. This is an issue for this libgcc code for the XFmode complex functions in libgcc on HP-UX; as I understand it, right now those will accidentally be using TFmode versions of those three functions, so involving unnecessary conversions, while the sanity check on CEXT accidentally passes because all it tests is the sizes of the types. Because of the lack of 'w' functions, the patch uses 'l' when the constant suffix is 'w', matching what the existing libgcc code would do for IA64 HP-UX in that case. Ideally there would be generic code to create such built-in functions for all supported floating-point types. That may be something to consider if support for TS 18661-3 (standard bindings for IEEE 754-2008, defining names such as _Float128, and function names such as copysignf128) is added in future. Bootstrapped with no regressions on x86_64-unknown-linux-gnu. gcc: * system.h (LIBGCC2_TF_CEXT): Poison. * config/i386/cygming.h (LIBGCC2_TF_CEXT): Remove. * config/i386/darwin.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/dragonfly.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/freebsd.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/gnu-user-common.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/openbsdelf.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/sol2.h (LIBGCC2_TF_CEXT): Likewise. * config/ia64/ia64.h (LIBGCC2_TF_CEXT): Likewise. * config/ia64/linux.h (LIBGCC2_TF_CEXT): Likewise. gcc/c-family: * c-cppbuiltin.c (c_cpp_builtins): Define __LIBGCC_*_FUNC_EXT__ for supported floating-point modes. libgcc: * libgcc2.c (CEXT): Define using __LIBGCC_*_FUNC_EXT__. From-SVN: r215368
2014-09-19 01:27:26 +02:00
# define CEXT __LIBGCC_SF_FUNC_EXT__
# define NOTRUNC (!__LIBGCC_SF_EXCESS_PRECISION__)
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
# define RBIG (__LIBGCC_SF_MAX__ / 2)
# define RMIN (__LIBGCC_SF_MIN__)
# define RMIN2 (__LIBGCC_SF_EPSILON__)
# define RMINSCAL (1 / __LIBGCC_SF_EPSILON__)
# define RMAX2 (RBIG * RMIN2)
#elif defined(L_muldc3) || defined(L_divdc3)
# define MTYPE DFtype
# define CTYPE DCtype
# define MODE dc
Remove LIBGCC2_TF_CEXT target macro. This patch removes the (undocumented) LIBGCC2_TF_CEXT target macro, replacing it by -fbuilding-libgcc predefines (and thereby gets rid of another LIBGCC2_LONG_DOUBLE_TYPE_SIZE conditional, though some more patches are needed before that target macro can be eliminated). This macro indicated the suffix used on __builtin_huge_val, __builtin_copysign, __builtin_fabs built-in function names to produce the names for a given floating-point mode. Predefines are added for all floating-point modes supported for libgcc, not just TFmode. These are fully accurate for modes corresponding to float, double and long double. For other modes, the suffix for *constants* is determined by the targetm.c.mode_for_suffix hook (the limit to two possible suffixes 'w' and 'q' being hardcoded in various places). This is in fact the suffix for built-in functions as well where such functions exist. * For i386, the *q functions always exist (whether or not TFmode is used for long double). The *w functions never exist (but this doesn't matter for libgcc, since no i386 configuration treats XFmode as a supported scalar mode if long double is TFmode; if __float80 were to be supported for 64-bit Android, properly such functions ought to be added). * For ia64, the *q functions exist for non-HP-UX (under HP-UX, long double is TFmode, so they aren't needed). The *w functions never exist. This is an issue for this libgcc code for the XFmode complex functions in libgcc on HP-UX; as I understand it, right now those will accidentally be using TFmode versions of those three functions, so involving unnecessary conversions, while the sanity check on CEXT accidentally passes because all it tests is the sizes of the types. Because of the lack of 'w' functions, the patch uses 'l' when the constant suffix is 'w', matching what the existing libgcc code would do for IA64 HP-UX in that case. Ideally there would be generic code to create such built-in functions for all supported floating-point types. That may be something to consider if support for TS 18661-3 (standard bindings for IEEE 754-2008, defining names such as _Float128, and function names such as copysignf128) is added in future. Bootstrapped with no regressions on x86_64-unknown-linux-gnu. gcc: * system.h (LIBGCC2_TF_CEXT): Poison. * config/i386/cygming.h (LIBGCC2_TF_CEXT): Remove. * config/i386/darwin.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/dragonfly.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/freebsd.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/gnu-user-common.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/openbsdelf.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/sol2.h (LIBGCC2_TF_CEXT): Likewise. * config/ia64/ia64.h (LIBGCC2_TF_CEXT): Likewise. * config/ia64/linux.h (LIBGCC2_TF_CEXT): Likewise. gcc/c-family: * c-cppbuiltin.c (c_cpp_builtins): Define __LIBGCC_*_FUNC_EXT__ for supported floating-point modes. libgcc: * libgcc2.c (CEXT): Define using __LIBGCC_*_FUNC_EXT__. From-SVN: r215368
2014-09-19 01:27:26 +02:00
# define CEXT __LIBGCC_DF_FUNC_EXT__
# define NOTRUNC (!__LIBGCC_DF_EXCESS_PRECISION__)
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
# define RBIG (__LIBGCC_DF_MAX__ / 2)
# define RMIN (__LIBGCC_DF_MIN__)
# define RMIN2 (__LIBGCC_DF_EPSILON__)
# define RMINSCAL (1 / __LIBGCC_DF_EPSILON__)
# define RMAX2 (RBIG * RMIN2)
#elif defined(L_mulxc3) || defined(L_divxc3)
# define MTYPE XFtype
# define CTYPE XCtype
# define MODE xc
Remove LIBGCC2_TF_CEXT target macro. This patch removes the (undocumented) LIBGCC2_TF_CEXT target macro, replacing it by -fbuilding-libgcc predefines (and thereby gets rid of another LIBGCC2_LONG_DOUBLE_TYPE_SIZE conditional, though some more patches are needed before that target macro can be eliminated). This macro indicated the suffix used on __builtin_huge_val, __builtin_copysign, __builtin_fabs built-in function names to produce the names for a given floating-point mode. Predefines are added for all floating-point modes supported for libgcc, not just TFmode. These are fully accurate for modes corresponding to float, double and long double. For other modes, the suffix for *constants* is determined by the targetm.c.mode_for_suffix hook (the limit to two possible suffixes 'w' and 'q' being hardcoded in various places). This is in fact the suffix for built-in functions as well where such functions exist. * For i386, the *q functions always exist (whether or not TFmode is used for long double). The *w functions never exist (but this doesn't matter for libgcc, since no i386 configuration treats XFmode as a supported scalar mode if long double is TFmode; if __float80 were to be supported for 64-bit Android, properly such functions ought to be added). * For ia64, the *q functions exist for non-HP-UX (under HP-UX, long double is TFmode, so they aren't needed). The *w functions never exist. This is an issue for this libgcc code for the XFmode complex functions in libgcc on HP-UX; as I understand it, right now those will accidentally be using TFmode versions of those three functions, so involving unnecessary conversions, while the sanity check on CEXT accidentally passes because all it tests is the sizes of the types. Because of the lack of 'w' functions, the patch uses 'l' when the constant suffix is 'w', matching what the existing libgcc code would do for IA64 HP-UX in that case. Ideally there would be generic code to create such built-in functions for all supported floating-point types. That may be something to consider if support for TS 18661-3 (standard bindings for IEEE 754-2008, defining names such as _Float128, and function names such as copysignf128) is added in future. Bootstrapped with no regressions on x86_64-unknown-linux-gnu. gcc: * system.h (LIBGCC2_TF_CEXT): Poison. * config/i386/cygming.h (LIBGCC2_TF_CEXT): Remove. * config/i386/darwin.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/dragonfly.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/freebsd.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/gnu-user-common.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/openbsdelf.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/sol2.h (LIBGCC2_TF_CEXT): Likewise. * config/ia64/ia64.h (LIBGCC2_TF_CEXT): Likewise. * config/ia64/linux.h (LIBGCC2_TF_CEXT): Likewise. gcc/c-family: * c-cppbuiltin.c (c_cpp_builtins): Define __LIBGCC_*_FUNC_EXT__ for supported floating-point modes. libgcc: * libgcc2.c (CEXT): Define using __LIBGCC_*_FUNC_EXT__. From-SVN: r215368
2014-09-19 01:27:26 +02:00
# define CEXT __LIBGCC_XF_FUNC_EXT__
# define NOTRUNC (!__LIBGCC_XF_EXCESS_PRECISION__)
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
# define RBIG (__LIBGCC_XF_MAX__ / 2)
# define RMIN (__LIBGCC_XF_MIN__)
# define RMIN2 (__LIBGCC_XF_EPSILON__)
# define RMINSCAL (1 / __LIBGCC_XF_EPSILON__)
# define RMAX2 (RBIG * RMIN2)
#elif defined(L_multc3) || defined(L_divtc3)
# define MTYPE TFtype
# define CTYPE TCtype
# define MODE tc
Remove LIBGCC2_TF_CEXT target macro. This patch removes the (undocumented) LIBGCC2_TF_CEXT target macro, replacing it by -fbuilding-libgcc predefines (and thereby gets rid of another LIBGCC2_LONG_DOUBLE_TYPE_SIZE conditional, though some more patches are needed before that target macro can be eliminated). This macro indicated the suffix used on __builtin_huge_val, __builtin_copysign, __builtin_fabs built-in function names to produce the names for a given floating-point mode. Predefines are added for all floating-point modes supported for libgcc, not just TFmode. These are fully accurate for modes corresponding to float, double and long double. For other modes, the suffix for *constants* is determined by the targetm.c.mode_for_suffix hook (the limit to two possible suffixes 'w' and 'q' being hardcoded in various places). This is in fact the suffix for built-in functions as well where such functions exist. * For i386, the *q functions always exist (whether or not TFmode is used for long double). The *w functions never exist (but this doesn't matter for libgcc, since no i386 configuration treats XFmode as a supported scalar mode if long double is TFmode; if __float80 were to be supported for 64-bit Android, properly such functions ought to be added). * For ia64, the *q functions exist for non-HP-UX (under HP-UX, long double is TFmode, so they aren't needed). The *w functions never exist. This is an issue for this libgcc code for the XFmode complex functions in libgcc on HP-UX; as I understand it, right now those will accidentally be using TFmode versions of those three functions, so involving unnecessary conversions, while the sanity check on CEXT accidentally passes because all it tests is the sizes of the types. Because of the lack of 'w' functions, the patch uses 'l' when the constant suffix is 'w', matching what the existing libgcc code would do for IA64 HP-UX in that case. Ideally there would be generic code to create such built-in functions for all supported floating-point types. That may be something to consider if support for TS 18661-3 (standard bindings for IEEE 754-2008, defining names such as _Float128, and function names such as copysignf128) is added in future. Bootstrapped with no regressions on x86_64-unknown-linux-gnu. gcc: * system.h (LIBGCC2_TF_CEXT): Poison. * config/i386/cygming.h (LIBGCC2_TF_CEXT): Remove. * config/i386/darwin.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/dragonfly.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/freebsd.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/gnu-user-common.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/openbsdelf.h (LIBGCC2_TF_CEXT): Likewise. * config/i386/sol2.h (LIBGCC2_TF_CEXT): Likewise. * config/ia64/ia64.h (LIBGCC2_TF_CEXT): Likewise. * config/ia64/linux.h (LIBGCC2_TF_CEXT): Likewise. gcc/c-family: * c-cppbuiltin.c (c_cpp_builtins): Define __LIBGCC_*_FUNC_EXT__ for supported floating-point modes. libgcc: * libgcc2.c (CEXT): Define using __LIBGCC_*_FUNC_EXT__. From-SVN: r215368
2014-09-19 01:27:26 +02:00
# define CEXT __LIBGCC_TF_FUNC_EXT__
# define NOTRUNC (!__LIBGCC_TF_EXCESS_PRECISION__)
Fix for powerpc64 long double complex divide failure - - - - New in version 6: Due to an oversight (i.e. coding error), version 5 changed the use of __LIBGCC_TF_EPSILON__ to __LIBGCC_DF_EPSILON__ but not the other LIBGCC_TF values. For correct execution of the long double test case it is necessary to also switch to using __LIBGCC_DF_MIN__. For consistency we also switch to using __LIBGCC_DF_MAX__. LDBL_MIN is 2**53 times as larger than DBL_MIN. The larger value causes the code to switch the order of computation when it is not optimal, resulting in failure for one of the values in the cdivchk_ld.c test. Using DBL_MIN does not cause that failure.. There may be opportunity for further refinement of IBM128 format Long Double complex divide, but that's beyond the scope of this patch. - - - - This revision adds a test in libgcc/libgcc2.c for when "__LIBGCC_TF_MANT_DIG__ == 106" to use __LIBGCC_DF_EPSILON__ instead of __LIBGCC_TF_EPSILON__. That is specific to IBM 128-bit format long doubles where EPSILON is very, very small and 1/EPSILON oveflows to infinity. This change avoids the overflow without affecting any other platform. Discussion in the patch is adjusted to reflect this limitation. It does not make any changes to .../rs6000/_divkc3.c, leaving it to use __LIBGCC_KF__*. That means the upstream gcc will not build in older IBM environments that do not recognize the KF floating point mode properly. Environments that do not need IBM longdouble support do build cleanly. - - - - This patch addresses the failure of powerpc64 long double complex divide in native ibm long double format after the patch "Practical improvement to libgcc complex divide". The new code uses the following macros which are intended to be mapped to appropriate values according to the underlying hardware representation. See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101104 RBIG a value near the maximum representation RMIN a value near the minimum representation (but not in the subnormal range) RMIN2 a value moderately less than 1 RMINSCAL the inverse of RMIN2 RMAX2 RBIG * RMIN2 - a value to limit scaling to not overflow When "long double" values were not using the IEEE 128-bit format but the traditional IBM 128-bit, the previous code used the LDBL values which caused overflow for RMINSCAL. The new code uses the DBL values. RBIG LDBL_MAX = 0x1.fffffffffffff800p+1022 DBL_MAX = 0x1.fffffffffffff000p+1022 RMIN LDBL_MIN = 0x1.0000000000000000p-969 RMIN DBL_MIN = 0x1.0000000000000000p-1022 RMIN2 LDBL_EPSILON = 0x0.0000000000001000p-1022 = 0x1.0p-1074 RMIN2 DBL_EPSILON = 0x1.0000000000000000p-52 RMINSCAL 1/LDBL_EPSILON = inf (1.0p+1074 does not fit in IBM 128-bit). 1/DBL_EPSILON = 0x1.0000000000000000p+52 RMAX2 = RBIG * RMIN2 = 0x1.fffffffffffff800p-52 RBIG * RMIN2 = 0x1.fffffffffffff000p+970 The MAX and MIN values have only modest changes since the maximum and minimum values are about the same as for double precision. The EPSILON field is considerably different. Due to how very small values can be represented in the lower 64 bits of the IBM 128-bit floating point, EPSILON is extremely small, so far beyond the desired value that inversion of the value overflows and even without the overflow, the RMAX2 is so small as to eliminate most usage of the test. The change has been tested on gcc135.fsffrance.org and gains the expected improvements in accuracy for long double complex divide. libgcc/ PR target/101104 * libgcc2.c (RMIN2, RMINSCAL, RMAX2): Use more correct values for native IBM 128-bit.
2021-10-04 00:07:06 +02:00
# if __LIBGCC_TF_MANT_DIG__ == 106
# define RBIG (__LIBGCC_DF_MAX__ / 2)
# define RMIN (__LIBGCC_DF_MIN__)
# define RMIN2 (__LIBGCC_DF_EPSILON__)
# define RMINSCAL (1 / __LIBGCC_DF_EPSILON__)
# else
# define RBIG (__LIBGCC_TF_MAX__ / 2)
# define RMIN (__LIBGCC_TF_MIN__)
# define RMIN2 (__LIBGCC_TF_EPSILON__)
# define RMINSCAL (1 / __LIBGCC_TF_EPSILON__)
# endif
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
# define RMAX2 (RBIG * RMIN2)
#else
# error
#endif
#define CONCAT3(A,B,C) _CONCAT3(A,B,C)
#define _CONCAT3(A,B,C) A##B##C
#define CONCAT2(A,B) _CONCAT2(A,B)
#define _CONCAT2(A,B) A##B
#define isnan(x) __builtin_isnan (x)
#define isfinite(x) __builtin_isfinite (x)
#define isinf(x) __builtin_isinf (x)
#define INFINITY CONCAT2(__builtin_huge_val, CEXT) ()
#define I 1i
/* Helpers to make the following code slightly less gross. */
#define COPYSIGN CONCAT2(__builtin_copysign, CEXT)
#define FABS CONCAT2(__builtin_fabs, CEXT)
/* Verify that MTYPE matches up with CEXT. */
extern void *compile_type_assert[sizeof(INFINITY) == sizeof(MTYPE) ? 1 : -1];
/* Ensure that we've lost any extra precision. */
#if NOTRUNC
# define TRUNC(x)
#else
# define TRUNC(x) __asm__ ("" : "=m"(x) : "m"(x))
#endif
#if defined(L_mulhc3) || defined(L_mulsc3) || defined(L_muldc3) \
|| defined(L_mulxc3) || defined(L_multc3)
CTYPE
CONCAT3(__mul,MODE,3) (MTYPE a, MTYPE b, MTYPE c, MTYPE d)
{
MTYPE ac, bd, ad, bc, x, y;
CTYPE res;
ac = a * c;
bd = b * d;
ad = a * d;
bc = b * c;
TRUNC (ac);
TRUNC (bd);
TRUNC (ad);
TRUNC (bc);
x = ac - bd;
y = ad + bc;
if (isnan (x) && isnan (y))
{
/* Recover infinities that computed as NaN + iNaN. */
_Bool recalc = 0;
if (isinf (a) || isinf (b))
{
/* z is infinite. "Box" the infinity and change NaNs in
the other factor to 0. */
a = COPYSIGN (isinf (a) ? 1 : 0, a);
b = COPYSIGN (isinf (b) ? 1 : 0, b);
if (isnan (c)) c = COPYSIGN (0, c);
if (isnan (d)) d = COPYSIGN (0, d);
recalc = 1;
}
if (isinf (c) || isinf (d))
{
/* w is infinite. "Box" the infinity and change NaNs in
the other factor to 0. */
c = COPYSIGN (isinf (c) ? 1 : 0, c);
d = COPYSIGN (isinf (d) ? 1 : 0, d);
if (isnan (a)) a = COPYSIGN (0, a);
if (isnan (b)) b = COPYSIGN (0, b);
recalc = 1;
}
if (!recalc
&& (isinf (ac) || isinf (bd)
|| isinf (ad) || isinf (bc)))
{
/* Recover infinities from overflow by changing NaNs to 0. */
if (isnan (a)) a = COPYSIGN (0, a);
if (isnan (b)) b = COPYSIGN (0, b);
if (isnan (c)) c = COPYSIGN (0, c);
if (isnan (d)) d = COPYSIGN (0, d);
recalc = 1;
}
if (recalc)
{
x = INFINITY * (a * c - b * d);
y = INFINITY * (a * d + b * c);
}
}
__real__ res = x;
__imag__ res = y;
return res;
}
#endif /* complex multiply */
#if defined(L_divhc3) || defined(L_divsc3) || defined(L_divdc3) \
|| defined(L_divxc3) || defined(L_divtc3)
CTYPE
CONCAT3(__div,MODE,3) (MTYPE a, MTYPE b, MTYPE c, MTYPE d)
{
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
#if defined(L_divhc3) \
|| (defined(L_divsc3) && defined(__LIBGCC_HAVE_HWDBL__) )
/* Half precision is handled with float precision.
float is handled with double precision when double precision
hardware is available.
Due to the additional precision, the simple complex divide
method (without Smith's method) is sufficient to get accurate
answers and runs slightly faster than Smith's method. */
AMTYPE aa, bb, cc, dd;
AMTYPE denom;
MTYPE x, y;
CTYPE res;
aa = a;
bb = b;
cc = c;
dd = d;
denom = (cc * cc) + (dd * dd);
x = ((aa * cc) + (bb * dd)) / denom;
y = ((bb * cc) - (aa * dd)) / denom;
#else
MTYPE denom, ratio, x, y;
CTYPE res;
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
/* double, extended, long double have significant potential
underflow/overflow errors that can be greatly reduced with
a limited number of tests and adjustments. float is handled
the same way when no HW double is available.
*/
/* Scale by max(c,d) to reduce chances of denominator overflowing. */
if (FABS (c) < FABS (d))
{
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
/* Prevent underflow when denominator is near max representable. */
if (FABS (d) >= RBIG)
{
a = a / 2;
b = b / 2;
c = c / 2;
d = d / 2;
}
/* Avoid overflow/underflow issues when c and d are small.
Scaling up helps avoid some underflows.
No new overflow possible since c&d < RMIN2. */
if (FABS (d) < RMIN2)
{
a = a * RMINSCAL;
b = b * RMINSCAL;
c = c * RMINSCAL;
d = d * RMINSCAL;
}
else
{
if (((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2))
|| ((FABS (b) < RMIN) && (FABS (a) < RMAX2)
&& (FABS (d) < RMAX2)))
{
a = a * RMINSCAL;
b = b * RMINSCAL;
c = c * RMINSCAL;
d = d * RMINSCAL;
}
}
ratio = c / d;
denom = (c * ratio) + d;
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
/* Choose alternate order of computation if ratio is subnormal. */
if (FABS (ratio) > RMIN)
{
x = ((a * ratio) + b) / denom;
y = ((b * ratio) - a) / denom;
}
else
{
x = ((c * (a / d)) + b) / denom;
y = ((c * (b / d)) - a) / denom;
}
}
else
{
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
/* Prevent underflow when denominator is near max representable. */
if (FABS (c) >= RBIG)
{
a = a / 2;
b = b / 2;
c = c / 2;
d = d / 2;
}
/* Avoid overflow/underflow issues when both c and d are small.
Scaling up helps avoid some underflows.
No new overflow possible since both c&d are less than RMIN2. */
if (FABS (c) < RMIN2)
{
a = a * RMINSCAL;
b = b * RMINSCAL;
c = c * RMINSCAL;
d = d * RMINSCAL;
}
else
{
if (((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (c) < RMAX2))
|| ((FABS (b) < RMIN) && (FABS (a) < RMAX2)
&& (FABS (c) < RMAX2)))
{
a = a * RMINSCAL;
b = b * RMINSCAL;
c = c * RMINSCAL;
d = d * RMINSCAL;
}
}
ratio = d / c;
denom = (d * ratio) + c;
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
/* Choose alternate order of computation if ratio is subnormal. */
if (FABS (ratio) > RMIN)
{
x = ((b * ratio) + a) / denom;
y = (b - (a * ratio)) / denom;
}
else
{
x = (a + (d * (b / c))) / denom;
y = (b - (d * (a / c))) / denom;
}
}
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
#endif
Practical improvement to libgcc complex divide Correctness and performance test programs used during development of this project may be found in the attachment to: https://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg254210.html Summary of Purpose This patch to libgcc/libgcc2.c __divdc3 provides an opportunity to gain important improvements to the quality of answers for the default complex divide routine (half, float, double, extended, long double precisions) when dealing with very large or very small exponents. The current code correctly implements Smith's method (1962) [2] further modified by c99's requirements for dealing with NaN (not a number) results. When working with input values where the exponents are greater than *_MAX_EXP/2 or less than -(*_MAX_EXP)/2, results are substantially different from the answers provided by quad precision more than 1% of the time. This error rate may be unacceptable for many applications that cannot a priori restrict their computations to the safe range. The proposed method reduces the frequency of "substantially different" answers by more than 99% for double precision at a modest cost of performance. Differences between current gcc methods and the new method will be described. Then accuracy and performance differences will be discussed. Background This project started with an investigation related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59714. Study of Beebe[1] provided an overview of past and recent practice for computing complex divide. The current glibc implementation is based on Robert Smith's algorithm [2] from 1962. A google search found the paper by Baudin and Smith [3] (same Robert Smith) published in 2012. Elen Kalda's proposed patch [4] is based on that paper. I developed two sets of test data by randomly distributing values over a restricted range and the full range of input values. The current complex divide handled the restricted range well enough, but failed on the full range more than 1% of the time. Baudin and Smith's primary test for "ratio" equals zero reduced the cases with 16 or more error bits by a factor of 5, but still left too many flawed answers. Adding debug print out to cases with substantial errors allowed me to see the intermediate calculations for test values that failed. I noted that for many of the failures, "ratio" was a subnormal. Changing the "ratio" test from check for zero to check for subnormal reduced the 16 bit error rate by another factor of 12. This single modified test provides the greatest benefit for the least cost, but the percentage of cases with greater than 16 bit errors (double precision data) is still greater than 0.027% (2.7 in 10,000). Continued examination of remaining errors and their intermediate computations led to the various tests of input value tests and scaling to avoid under/overflow. The current patch does not handle some of the rare and most extreme combinations of input values, but the random test data is only showing 1 case in 10 million that has an error of greater than 12 bits. That case has 18 bits of error and is due to subtraction cancellation. These results are significantly better than the results reported by Baudin and Smith. Support for half, float, double, extended, and long double precision is included as all are handled with suitable preprocessor symbols in a single source routine. Since half precision is computed with float precision as per current libgcc practice, the enhanced algorithm provides no benefit for half precision and would cost performance. Further investigation showed changing the half precision algorithm to use the simple formula (real=a*c+b*d imag=b*c-a*d) caused no loss of precision and modest improvement in performance. The existing constants for each precision: float: FLT_MAX, FLT_MIN; double: DBL_MAX, DBL_MIN; extended and/or long double: LDBL_MAX, LDBL_MIN are used for avoiding the more common overflow/underflow cases. This use is made generic by defining appropriate __LIBGCC2_* macros in c-cppbuiltin.c. Tests are added for when both parts of the denominator have exponents small enough to allow shifting any subnormal values to normal values all input values could be scaled up without risking overflow. That gained a clear improvement in accuracy. Similarly, when either numerator was subnormal and the other numerator and both denominator values were not too large, scaling could be used to reduce risk of computing with subnormals. The test and scaling values used all fit within the allowed exponent range for each precision required by the C standard. Float precision has more difficulty with getting correct answers than double precision. When hardware for double precision floating point operations is available, float precision is now handled in double precision intermediate calculations with the simple algorithm the same as the half-precision method of using float precision for intermediate calculations. Using the higher precision yields exact results for all tested input values (64-bit double, 32-bit float) with the only performance cost being the requirement to convert the four input values from float to double. If double precision hardware is not available, then float complex divide will use the same improved algorithm as the other precisions with similar change in performance. Further Improvement The most common remaining substantial errors are due to accuracy loss when subtracting nearly equal values. This patch makes no attempt to improve that situation. NOTATION For all of the following, the notation is: Input complex values: a+bi (a= real part, b= imaginary part) c+di Output complex value: e+fi = (a+bi)/(c+di) For the result tables: current = current method (SMITH) b1div = method proposed by Elen Kalda b2div = alternate method considered by Elen Kalda new = new method proposed by this patch DESCRIPTIONS of different complex divide methods: NAIVE COMPUTATION (-fcx-limited-range): e = (a*c + b*d)/(c*c + d*d) f = (b*c - a*d)/(c*c + d*d) Note that c*c and d*d will overflow or underflow if either c or d is outside the range 2^-538 to 2^512. This method is available in gcc when the switch -fcx-limited-range is used. That switch is also enabled by -ffast-math. Only one who has a clear understanding of the maximum range of all intermediate values generated by an application should consider using this switch. SMITH's METHOD (current libgcc): if(fabs(c)<fabs(d) { r = c/d; denom = (c*r) + d; e = (a*r + b) / denom; f = (b*r - a) / denom; } else { r = d/c; denom = c + (d*r); e = (a + b*r) / denom; f = (b - a*r) / denom; } Smith's method is the current default method available with __divdc3. Elen Kalda's METHOD Elen Kalda proposed a patch about a year ago, also based on Baudin and Smith, but not including tests for subnormals: https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html [4] It is compared here for accuracy with this patch. This method applies the most significant part of the algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. Elen's method also replaces two divides by one divide and two multiplies due to the high cost of divide on aarch64. In the comparison sections, this method will be labeled b1div. A variation discussed in that patch which does not replace the two divides will be labeled b2div. inline void improved_internal (MTYPE a, MTYPE b, MTYPE c, MTYPE d) { r = d/c; t = 1.0 / (c + (d * r)); if (r != 0) { x = (a + (b * r)) * t; y = (b - (a * r)) * t; } else { /* Changing the order of operations avoids the underflow of r impacting the result. */ x = (a + (d * (b / c))) * t; y = (b - (d * (a / c))) * t; } } if (FABS (d) < FABS (c)) { improved_internal (a, b, c, d); } else { improved_internal (b, a, d, c); y = -y; } NEW METHOD (proposed by patch) to replace the current default method: The proposed method starts with an algorithm proposed by Baudin&Smith (2012) in the paper "A Robust Complex Division in Scilab" [3]. The patch makes additional modifications to that method for further reductions in the error rate. The following code shows the #define values for double precision. See the patch for #define values used for other precisions. #define RBIG ((DBL_MAX)/2.0) #define RMIN (DBL_MIN) #define RMIN2 (0x1.0p-53) #define RMINSCAL (0x1.0p+51) #define RMAX2 ((RBIG)*(RMIN2)) if (FABS(c) < FABS(d)) { /* prevent overflow when arguments are near max representable */ if ((FABS (d) > RBIG) || (FABS (a) > RBIG) || (FABS (b) > RBIG) ) { a = a * 0.5; b = b * 0.5; c = c * 0.5; d = d * 0.5; } /* minimize overflow/underflow issues when c and d are small */ else if (FABS (d) < RMIN2) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } else { if(((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2)) || ((FABS (b) < RMIN) && (FABS (a) < RMAX2) && (FABS (d) < RMAX2))) { a = a * RMINSCAL; b = b * RMINSCAL; c = c * RMINSCAL; d = d * RMINSCAL; } } r = c/d; denom = (c*r) + d; if( r > RMIN ) { e = (a*r + b) / denom ; f = (b*r - a) / denom } else { e = (c * (a/d) + b) / denom; f = (c * (b/d) - a) / denom; } } [ only presenting the fabs(c) < fabs(d) case here, full code in patch. ] Before any computation of the answer, the code checks for any input values near maximum to allow down scaling to avoid overflow. These scalings almost never harm the accuracy since they are by 2. Values that are over RBIG are relatively rare but it is easy to test for them and allow aviodance of overflows. Testing for RMIN2 reveals when both c and d are less than [FLT|DBL]_EPSILON. By scaling all values by 1/EPSILON, the code converts subnormals to normals, avoids loss of accuracy and underflows in intermediate computations that otherwise might occur. If scaling a and b by 1/EPSILON causes either to overflow, then the computation will overflow whatever method is used. Finally, we test for either a or b being subnormal (RMIN) and if so, for the other three values being small enough to allow scaling. We only need to test a single denominator value since we have already determined which of c and d is larger. Next, r (the ratio of c to d) is checked for being near zero. Baudin and Smith checked r for zero. This code improves that approach by checking for values less than DBL_MIN (subnormal) covers roughly 12 times as many cases and substantially improves overall accuracy. If r is too small, then when it is used in a multiplication, there is a high chance that the result will underflow to zero, losing significant accuracy. That underflow is avoided by reordering the computation. When r is subnormal, the code replaces a*r (= a*(c/d)) with ((a/d)*c) which is mathematically the same but avoids the unnecessary underflow. TEST Data Two sets of data are presented to test these methods. Both sets contain 10 million pairs of complex values. The exponents and mantissas are generated using multiple calls to random() and then combining the results. Only values which give results to complex divide that are representable in the appropriate precision after being computed in quad precision are used. The first data set is labeled "moderate exponents". The exponent range is limited to -DBL_MAX_EXP/2 to DBL_MAX_EXP/2 for Double Precision (use FLT_MAX_EXP or LDBL_MAX_EXP for the appropriate precisions. The second data set is labeled "full exponents". The exponent range for these cases is the full exponent range including subnormals for a given precision. ACCURACY Test results: Note: The following accuracy tests are based on IEEE-754 arithmetic. Note: All results reporteed are based on use of fused multiply-add. If fused multiply-add is not used, the error rate increases, giving more 1 and 2 bit errors for both current and new complex divide. Differences between using fused multiply and not using it that are greater than 2 bits are less than 1 in a million. The complex divide methods are evaluated by determining the percentage of values that exceed differences in low order bits. If a "2 bit" test results show 1%, that would mean that 1% of 10,000,000 values (100,000) have either a real or imaginary part that differs from the quad precision result by more than the last 2 bits. Results are reported for differences greater than or equal to 1 bit, 2 bits, 8 bits, 16 bits, 24 bits, and 52 bits for double precision. Even when the patch avoids overflows and underflows, some input values are expected to have errors due to the potential for catastrophic roundoff from floating point subtraction. For example, when b*c and a*d are nearly equal, the result of subtraction may lose several places of accuracy. This patch does not attempt to detect or minimize this type of error, but neither does it increase them. I only show the results for Elen Kalda's method (with both 1 and 2 divides) and the new method for only 1 divide in the double precision table. In the following charts, lower values are better. current - current complex divide in libgcc b1div - Elen Kalda's method from Baudin & Smith with one divide b2div - Elen Kalda's method from Baudin & Smith with two divides new - This patch which uses 2 divides =================================================== Errors Moderate Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 0.24707% 0.92986% 0.24707% 0.24707% 2 bits 0.01762% 0.01770% 0.01762% 0.01762% 8 bits 0.00026% 0.00026% 0.00026% 0.00026% 16 bits 0.00000% 0.00000% 0.00000% 0.00000% 24 bits 0% 0% 0% 0% 52 bits 0% 0% 0% 0% =================================================== Table 1: Errors with Moderate Dataset (Double Precision) Note in Table 1 that both the old and new methods give identical error rates for data with moderate exponents. Errors exceeding 16 bits are exceedingly rare. There are substantial increases in the 1 bit error rates for b1div (the 1 divide/2 multiplys method) as compared to b2div (the 2 divides method). These differences are minimal for 2 bits and larger error measurements. =================================================== Errors Full Dataset gtr eq current b1div b2div new ====== ======== ======== ======== ======== 1 bit 2.05% 1.23842% 0.67130% 0.16664% 2 bits 1.88% 0.51615% 0.50354% 0.00900% 8 bits 1.77% 0.42856% 0.42168% 0.00011% 16 bits 1.63% 0.33840% 0.32879% 0.00001% 24 bits 1.51% 0.25583% 0.24405% 0.00000% 52 bits 1.13% 0.01886% 0.00350% 0.00000% =================================================== Table 2: Errors with Full Dataset (Double Precision) Table 2 shows significant differences in error rates. First, the difference between b1div and b2div show a significantly higher error rate for the b1div method both for single bit errros and well beyond. Even for 52 bits, we see the b1div method gets completely wrong answers more than 5 times as often as b2div. To retain comparable accuracy with current complex divide results for small exponents and due to the increase in errors for large exponents, I choose to use the more accurate method of two divides. The current method has more 1.6% of cases where it is getting results where the low 24 bits of the mantissa differ from the correct answer. More than 1.1% of cases where the answer is completely wrong. The new method shows less than one case in 10,000 with greater than two bits of error and only one case in 10 million with greater than 16 bits of errors. The new patch reduces 8 bit errors by a factor of 16,000 and virtually eliminates completely wrong answers. As noted above, for architectures with double precision hardware, the new method uses that hardware for the intermediate calculations before returning the result in float precision. Testing of the new patch has shown zero errors found as seen in Tables 3 and 4. Correctness for float ============================= Errors Moderate Dataset gtr eq current new ====== ======== ======== 1 bit 28.68070% 0% 2 bits 0.64386% 0% 8 bits 0.00401% 0% 16 bits 0.00001% 0% 24 bits 0% 0% ============================= Table 3: Errors with Moderate Dataset (float) ============================= Errors Full Dataset gtr eq current new ====== ======== ======== 1 bit 19.98% 0% 2 bits 3.20% 0% 8 bits 1.97% 0% 16 bits 1.08% 0% 24 bits 0.55% 0% ============================= Table 4: Errors with Full Dataset (float) As before, the current method shows an troubling rate of extreme errors. There very minor changes in accuracy for half-precision since the code changes from Smith's method to the simple method. 5 out of 1 million test cases show correct answers instead of 1 or 2 bit errors. libgcc computes half-precision functions in float precision allowing the existing methods to avoid overflow/underflow issues for the allowed range of exponents for half-precision. Extended precision (using x87 80-bit format on x86) and Long double (using IEEE-754 128-bit on x86 and aarch64) both have 15-bit exponents as compared to 11-bit exponents in double precision. We note that the C standard also allows Long Double to be implemented in the equivalent range of Double. The RMIN2 and RMINSCAL constants are selected to work within the Double range as well as with extended and 128-bit ranges. We will limit our performance and accurancy discussions to the 80-bit and 128-bit formats as seen on x86 here. The extended and long double precision investigations were more limited. Aarch64 does not support extended precision but does support the software implementation of 128-bit long double precision. For x86, long double defaults to the 80-bit precision but using the -mlong-double-128 flag switches to using the software implementation of 128-bit precision. Both 80-bit and 128-bit precisions have the same exponent range, with the 128-bit precision has extended mantissas. Since this change is only aimed at avoiding underflow/overflow for extreme exponents, I studied the extended precision results on x86 for 100,000 values. The limited exponent dataset showed no differences. For the dataset with full exponent range, the current and new values showed major differences (greater than 32 bits) in 567 cases out of 100,000 (0.56%). In every one of these cases, the ratio of c/d or d/c (as appropriate) was zero or subnormal, indicating the advantage of the new method and its continued correctness where needed. PERFORMANCE Test results In order for a library change to be practical, it is necessary to show the slowdown is tolerable. The slowdowns observed are much less than would be seen by (for example) switching from hardware double precison to a software quad precision, which on the tested machines causes a slowdown of around 100x). The actual slowdown depends on the machine architecture. It also depends on the nature of the input data. If underflow/overflow is rare, then implementations that have strong branch prediction will only slowdown by a few cycles. If underflow/overflow is common, then the branch predictors will be less accurate and the cost will be higher. Results from two machines are presented as examples of the overhead for the new method. The one labeled x86 is a 5 year old Intel x86 processor and the one labeled aarch64 is a 3 year old arm64 processor. In the following chart, the times are averaged over a one million value data set. All values are scaled to set the time of the current method to be 1.0. Lower values are better. A value of less than 1.0 would be faster than the current method and a value greater than 1.0 would be slower than the current method. ================================================ Moderate set full set x86 aarch64 x86 aarch64 ======== =============== =============== float 0.59 0.79 0.45 0.81 double 1.04 1.24 1.38 1.56 long double 1.13 1.24 1.29 1.25 ================================================ Table 5: Performance Comparisons (ratio new/current) The above tables omit the timing for the 1 divide and 2 multiply comparison with the 2 divide approach. The float results show clear performance improvement due to using the simple method with double precision for intermediate calculations. The double results with the newer method show less overhead for the moderate dataset than for the full dataset. That's because the moderate dataset does not ever take the new branches which protect from under/overflow. The better the branch predictor, the lower the cost for these untaken branches. Both platforms are somewhat dated, with the x86 having a better branch predictor which reduces the cost of the additional branches in the new code. Of course, the relative slowdown may be greater for some architectures, especially those with limited branch prediction combined with a high cost of misprediction. The long double results are fairly consistent in showing the moderate additional cost of the extra branches and calculations for all cases. The observed cost for all precisions is claimed to be tolerable on the grounds that: (a) the cost is worthwhile considering the accuracy improvement shown. (b) most applications will only spend a small fraction of their time calculating complex divide. (c) it is much less than the cost of extended precision (d) users are not forced to use it (as described below) Those users who find this degree of slowdown unsatisfactory may use the gcc switch -fcx-fortran-rules which does not use the library routine, instead inlining Smith's method without the C99 requirement for dealing with NaN results. The proposed patch for libgcc complex divide does not affect the code generated by -fcx-fortran-rules. SUMMARY When input data to complex divide has exponents whose absolute value is less than half of *_MAX_EXP, this patch makes no changes in accuracy and has only a modest effect on performance. When input data contains values outside those ranges, the patch eliminates more than 99.9% of major errors with a tolerable cost in performance. In comparison to Elen Kalda's method, this patch introduces more performance overhead but reduces major errors by a factor of greater than 4000. REFERENCES [1] Nelson H.F. Beebe, "The Mathematical-Function Computation Handbook. Springer International Publishing AG, 2017. [2] Robert L. Smith. Algorithm 116: Complex division. Commun. ACM, 5(8):435, 1962. [3] Michael Baudin and Robert L. Smith. "A robust complex division in Scilab," October 2012, available at http://arxiv.org/abs/1210.4539. [4] Elen Kalda: Complex division improvements in libgcc https://gcc.gnu.org/legacy-ml/gcc-patches/2019-08/msg01629.html 2020-12-08 Patrick McGehearty <patrick.mcgehearty@oracle.com> gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Add supporting macros for new complex divide libgcc/ * libgcc2.c (XMTYPE, XCTYPE, RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divsc3, __divdc3, __divxc3, __divtc3): Improve complex divide. * config/rs6000/_divkc3.c (RBIG, RMIN, RMIN2, RMINSCAL, RMAX2): Define. (__divkc3): Improve complex divide. gcc/testsuite/ * gcc.c-torture/execute/ieee/cdivchkd.c: New test. * gcc.c-torture/execute/ieee/cdivchkf.c: Likewise. * gcc.c-torture/execute/ieee/cdivchkld.c: Likewise.
2021-04-28 21:14:48 +02:00
/* Recover infinities and zeros that computed as NaN+iNaN; the only
cases are nonzero/zero, infinite/finite, and finite/infinite. */
if (isnan (x) && isnan (y))
{
if (c == 0.0 && d == 0.0 && (!isnan (a) || !isnan (b)))
{
x = COPYSIGN (INFINITY, c) * a;
y = COPYSIGN (INFINITY, c) * b;
}
else if ((isinf (a) || isinf (b)) && isfinite (c) && isfinite (d))
{
a = COPYSIGN (isinf (a) ? 1 : 0, a);
b = COPYSIGN (isinf (b) ? 1 : 0, b);
x = INFINITY * (a * c + b * d);
y = INFINITY * (b * c - a * d);
}
else if ((isinf (c) || isinf (d)) && isfinite (a) && isfinite (b))
{
c = COPYSIGN (isinf (c) ? 1 : 0, c);
d = COPYSIGN (isinf (d) ? 1 : 0, d);
x = 0.0 * (a * c + b * d);
y = 0.0 * (b * c - a * d);
}
}
__real__ res = x;
__imag__ res = y;
return res;
}
#endif /* complex divide */
#endif /* all complex float routines */
/* From here on down, the routines use normal data types. */
#define SItype bogus_type
#define USItype bogus_type
#define DItype bogus_type
#define UDItype bogus_type
#define SFtype bogus_type
#define DFtype bogus_type
#undef Wtype
#undef UWtype
#undef HWtype
#undef UHWtype
#undef DWtype
#undef UDWtype
#undef char
#undef short
#undef int
#undef long
#undef unsigned
#undef float
#undef double
#ifdef L__gcc_bcmp
/* Like bcmp except the sign is meaningful.
1995-05-16 14:39:54 +02:00
Result is negative if S1 is less than S2,
positive if S1 is greater, 0 if S1 and S2 are equal. */
int
__gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
{
while (size > 0)
{
const unsigned char c1 = *s1++, c2 = *s2++;
if (c1 != c2)
return c1 - c2;
size--;
}
return 0;
}
#endif
/* __eprintf used to be used by GCC's private version of <assert.h>.
We no longer provide that header, but this routine remains in libgcc.a
for binary backward compatibility. Note that it is not included in
the shared version of libgcc. */
#ifdef L_eprintf
#ifndef inhibit_libc
#undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
#include <stdio.h>
void
__eprintf (const char *string, const char *expression,
unsigned int line, const char *filename)
{
fprintf (stderr, string, expression, line, filename);
fflush (stderr);
abort ();
}
#endif
1992-01-28 04:44:05 +01:00
#endif
#ifdef L_clear_cache
/* Clear part of an instruction cache. */
void
__clear_cache (void *beg __attribute__((__unused__)),
void *end __attribute__((__unused__)))
1992-01-28 04:44:05 +01:00
{
#ifdef CLEAR_INSN_CACHE
/* Cast the void* pointers to char* as some implementations
of the macro assume the pointers can be subtracted from
one another. */
CLEAR_INSN_CACHE ((char *) beg, (char *) end);
#endif /* CLEAR_INSN_CACHE */
1992-01-28 04:44:05 +01:00
}
#endif /* L_clear_cache */
#ifdef L_trampoline
/* Jump to a trampoline, loading the static chain address. */
MAINTAINERS (mt port): Remove. * MAINTAINERS (mt port): Remove. (sco5, unixware, sco udk): Remove. (Kean Johnston): Add to Write After Approval. fixincludes: * inclhack.def (AAB_svr4_replace_byteorder, AAB_ultrix_ansi_compat, AAB_ultrix_limits, AAB_ultrix_memory, libc1_G_va_list, libc1_ifdefd_memx, nested_motorola, ptx_sys_mc_param_h, sco_regset, sco_static_func, sco_utime, solaris_mutex_init_1, solaris_socket, solaris_unistd, solaris_widec, svr4_krnl, ultrix_atexit_param, ultrix_atof_param, ultrix_const3, ultrix_fix_fixproto, ultrix_ifdef, ultrix_locale, ultrix_math_ifdef, ultrix_nested_ioctl, ultrix_nested_svc, ultrix_stat, ultrix_static, ultrix_stdlib, ultrix_strings, ultrix_strings2, ultrix_sys_time, ultrix_unistd, unicosmk_restrict, uw7_byteorder_fix, windiss_math1, windiss_math2, windiss_valist): Remove. * fixincl.x: Regenerate. * mkfixinc.sh: (arm-semi-aof, hppa1.1-*-osf*, hppa1.1-*-bsd*, i370-*-openedition, i?86-*-moss*, i?86-*-uwin*, powerpc-*-eabiaix*): Remove. * tests/base/math.h: Update. * tests/base/pthread.h: Update. * tests/base/stdio.h: Update. * tests/base/stdlib.h: Update. * tests/base/string.h: Update. * tests/base/strings.h: Update. * tests/base/sys/file.h: Update. * tests/base/sys/limits.h: Update. * tests/base/sys/socket.h: Update. * tests/base/sys/stat.h: Update. * tests/base/sys/time.h: Update. * tests/base/testing.h: Update. * tests/base/unistd.h: Update. * tests/base/_G_config.h: Remove. * tests/base/arpa: Remove directory. * tests/base/fs: Remove directory. * tests/base/locale.h: Remove. * tests/base/machine: Remove directory. * tests/base/rpc/svc.h: Remove. * tests/base/sys/ioctl.h: Remove. * tests/base/sys/regset.h: Remove. * tests/base/sys/times.h: Remove. * tests/base/sys/utsname.h: Remove. * tests/base/widec.h: Remove. gcc: * config.gcc (Obsolete configurations): Remove list of configurations. (Unsupported targets list): Add *-*-linux*aout*, *-*-linux*libc1*, *-*-solaris2.[0-6], *-*-solaris2.[0-6].*, *-*-sysv*. Remove other targets matched by those patterns. (strongarm*-*-*, ep9312*-*-*, xscale-*-*, parisc*-*-*, m680[012]0-*-*, *-*-linux*libc1*, *-*-linux*aout*, alpha*-*-unicosmk*, strongarm*-*-freebsd*, ep9312-*-elf, arm*-*-kaos*, cris-*-aout, parisc*64*-*-linux*, parisc*-*-linux*, hppa1.1-*-pro*, hppa1.1-*-osf*, hppa1.1-*-bsd*, i[34567]86-sequent-ptx4*, i[34567]86-sequent-sysv4*, i[34567]86-*-beoself*, i[34567]86-*-beos*, i[34567]86-*-sco3.2v5*, i[34567]86-*-sysv5*, i[34567]86-*-sysv4*, i[34567]86-*-uwin*, i[34567]86-*-kaos*, m68020-*-elf*, m68010-*-netbsdelf*, mips-wrs-windiss, mt-*-elf, powerpc-*-beos*, powerpc-*-chorusos*, powerpc-wrs-windiss*, powerpcle-*-sysv*, powerpc-*-kaos*, powerpcle-*-kaos*, sh*-*-kaos*, sparc-*-sysv4*, strongarm-*-elf*, strongarm-*-pe, strongarm-*-kaos*, vax-*-bsd*, vax-*-sysv*, vax-*-ultrix*, xscale-*-elf, xscale-*-coff, i[34567]86-*-linux*aout*, i[34567]86-*-linux*libc1): Remove. Make code for Solaris 7 and greater unconditional for Solaris. (ep9312-*-*, parisc1*, m680[012]0-*-*, parisc*-*-*, mt-*-*): Remove --with-* handling. * config/rs6000/sysv4.h (-mwindiss): Remove from all specs. (LIB_WINDISS_SPEC, CPP_OS_WINDISS_SPEC, STARTFILE_WINDISS_SPEC, ENDFILE_WINDISS_SPEC, LINK_START_WINDISS_SPEC, LINK_OS_WINDISS_SPEC): Remove. * config/rs6000/sysv4.opt (mwindiss): Remove. * configure.ac (strongarm*-*-*, xscale*-*-*): Remove. * configure: Regenerate. * doc/cpp.texi: Don't mention BeOS. * doc/extend.texi (interrupt): Don't mention MS1. * doc/install.texi: (i386-@var{any}-sysv, m68k-bull-sysv, m68k-hp-hpux, m68000-hp-hpux, m68000-att-sysv, alphaev5-cray-unicosmk*, xscale-*-*, i?86-*-linux*aout, i?86-*-sco3.2v5*, i?86-*-udk, m68k-hp-hpux, powerpc-*-sysv4, powerpc-*-sysv4, powerpcle-*-sysv4, *-*-sysv*, vax-dec-ultrix): Remove. * doc/invoke.texi (MT Options): Remove. (-mwindiss): Remove. (CRIS Options): Remove cris-axis-aout references. (HPPA Options): Don't mention hppa1.1-*-pro. * doc/md.texi: (MorphoTech family): Remove. * libgcc2.c: Don't handle UWIN. * config/alpha/t-unicosmk: Remove. * config/alpha/unicosmk.h: Remove. * config/arm/kaos-arm.h: Remove. * config/arm/kaos-strongarm.h: Remove. * config/arm/strongarm-coff.h: Remove. * config/arm/strongarm-elf.h: Remove. * config/arm/strongarm-pe.h: Remove. * config/arm/t-strongarm-pe: Remove. * config/arm/t-xscale-coff: Remove. * config/arm/t-xscale-elf: Remove. * config/arm/xscale-coff.h: Remove. * config/arm/xscale-elf.h: Remove. * config/chorus.h: Remove. * config/cris/aout.h: Remove. * config/cris/aout.opt: Remove. * config/cris/t-aout: Remove. * config/i386/beos-elf.h: Remove. * config/i386/kaos-i386.h: Remove. * config/i386/ptx4-i.h: Remove. * config/i386/sco5.h: Remove. * config/i386/sco5.opt: Remove. * config/i386/sysv4-cpp.h: Remove. * config/i386/sysv5.h: Remove. * config/i386/t-beos: Remove. * config/i386/t-sco5: Remove. * config/i386/t-uwin: Remove. * config/i386/uwin.asm: Remove. * config/i386/uwin.h: Remove. * config/kaos.h: Remove. * config/mips/windiss.h: Remove. * config/mt: Remove directory. * config/pa/pa-osf.h: Remove. * config/pa/pa-pro-end.h: Remove. * config/pa/t-pro: Remove. * config/ptx4.h: Remove. * config/rs6000/beos.h: Remove. * config/rs6000/kaos-ppc.h: Remove. * config/rs6000/t-beos: Remove. * config/rs6000/windiss.h: Remove. * config/sh/kaos-sh.h: Remove. * config/sol2-6.h: Remove. * config/sparc/sol26-sld.h: Remove. * config/sparc/sysv4-only.h: Remove. * config/vax/bsd.h: Remove. * config/vax/t-memfuncs: Remove. * config/vax/ultrix.h: Remove. * config/vax/vaxv.h: Remove. * config/windiss.h: Remove. gcc/testsuite: * g++.dg/abi/arm_cxa_vec1.C: Don't handle xscale*-*-*. * g++.dg/eh/spbp.C: Don't handle *-*-solaris2.[56]*. * g++.dg/warn/miss-format-1.C: Don't handle Solaris before Solaris 7. * gcc.c-torture/compile/981006-1.c: Don't handle xscale*-*-*, strongarm*-*-* and cris-*-aout*. * gcc.c-torture/execute/941014-1.x: Don't handle xscale*-*-* and strongarm*-*-*. * gcc.dg/20030909-1.c: Don't handle xscale*-*-* and strongarm*-*-*. * gcc.dg/20031108-1.c: Don't handle xscale*-*-* and strongarm*-*-*. * gcc.dg/20040813-1.c: Don't handle *-*-sysv5*. * gcc.dg/arm-asm.c: Don't handle strongarm*-*-* and xscale*-*-*. * gcc.dg/arm-scd42-1.c: Use target arm*-*-*. * gcc.dg/arm-scd42-3.c: Use target arm*-*-*. * gcc.dg/cpp/assert4.c: Don't handle BeOS. * gcc.dg/debug/pr35154.c: Don't handle *-*-sysv5*. * gcc.dg/intmax_t-1.c: Don't handle *-*-solaris2.5.1 and xscale*-*-elf*. * gcc.dg/pragma-align.c: Don't handle i?86-*-sco3.2v5*. * gcc.dg/pthread-init-2.c: Don't handle *-*-solaris2.5.1. * gcc.misc-tests/arm-isr.exp: Use target arm*-*-*. * gcc.target/powerpc/ppc-sdata-1.c: Don't handle powerpc-*-sysv*. * gcc.target/powerpc/ppc-sdata-2.c: Don't handle powerpc-*-sysv*. * gcc.target/powerpc/ppc-stackalign-1.c: Don't handle powerpc-*-sysv*. * gfortran.dg/debug/pr35154-stabs.f: Don't handle *-*-sysv5*. * lib/target-supports.exp: Don't handle strongarm*-*-elf, xscale*-*-elf and *-*-windiss. * obj-c++.dg/dwarf-2.mm: Don't handle *-*-solaris2.[56]*. * objc.dg/dwarf-1.m: Don't handle *-*-solaris2.[56]*. * objc.dg/dwarf-2.m: Don't handle *-*-solaris2.[56]*. * gcc.dg/mt-loopi1.c: Remove. gnattools: * configure.ac (xscale*-wrs-vx*, xscale*-wrs-coff): Remove. * configure: Regenerate. libcpp: * configure.ac (parisc*64*-*-*): Remove. * configure: Regenerate. libffi: * configure.ac (parisc*-*-linux*, powerpc-*-sysv*, powerpc-*-beos*): Remove. * configure: Regenerate. libgcc: * config.host (strongarm*-*-*, ep9312*-*-*, xscale-*-*, parisc*-*-*, m680[012]0-*-*, *-*-linux*libc1*, *-*-linux*aout*, alpha*-*-unicosmk*, strongarm*-*-freebsd*, ep9312-*-elf, arm*-*-kaos*, cris-*-aout, parisc*64*-*-linux*, parisc*-*-linux*, hppa1.1-*-pro*, hppa1.1-*-osf*, hppa1.1-*-bsd*, i[34567]86-sequent-ptx4*, i[34567]86-sequent-sysv4*, i[34567]86-*-beoself*, i[34567]86-*-beos*, i[34567]86-*-sco3.2v5*, i[34567]86-*-sysv5*, i[34567]86-*-sysv4*, i[34567]86-*-uwin*, i[34567]86-*-kaos*, m68020-*-elf*, m68010-*-netbsdelf*, mips-wrs-windiss, mt-*-elf, powerpc-*-beos*, powerpc-*-chorusos*, powerpc-wrs-windiss*, powerpcle-*-sysv*, powerpc-*-kaos*, powerpcle-*-kaos*, sh*-*-kaos*, sparc-*-sysv4*, strongarm-*-elf*, strongarm-*-pe, strongarm-*-kaos*, vax-*-bsd*, vax-*-sysv*, vax-*-ultrix*, xscale-*-elf, xscale-*-coff): Remove. libjava: * configure.host (strongarm*-elf, xscale*-elf): Remove. libstdc++-v3: * configure.host (xscale, ep9312, m680[246]0, solaris2.5, solaris2.5.[0-9], solaris2.6, windiss*): Remove. * crossconfig.m4 (*-solaris2.5, *-solaris2.6, *-windiss*): Remove. * configure: Regenerate. * config/os/solaris/solaris2.5: Remove directory. * config/os/solaris/solaris2.6: Remove directory. * config/os/windiss: Remove directory. From-SVN: r136534
2008-06-07 20:00:15 +02:00
#if defined(WINNT) && ! defined(__CYGWIN__)
#include <windows.h>
int getpagesize (void);
int mprotect (char *,int, int);
int
getpagesize (void)
{
#ifdef _ALPHA_
return 8192;
#else
return 4096;
#endif
}
int
mprotect (char *addr, int len, int prot)
{
DWORD np, op;
if (prot == 7)
np = 0x40;
else if (prot == 5)
np = 0x20;
else if (prot == 4)
np = 0x10;
else if (prot == 3)
np = 0x04;
else if (prot == 1)
np = 0x02;
else if (prot == 0)
np = 0x01;
else
return -1;
if (VirtualProtect (addr, len, np, &op))
return 0;
else
return -1;
}
MAINTAINERS (mt port): Remove. * MAINTAINERS (mt port): Remove. (sco5, unixware, sco udk): Remove. (Kean Johnston): Add to Write After Approval. fixincludes: * inclhack.def (AAB_svr4_replace_byteorder, AAB_ultrix_ansi_compat, AAB_ultrix_limits, AAB_ultrix_memory, libc1_G_va_list, libc1_ifdefd_memx, nested_motorola, ptx_sys_mc_param_h, sco_regset, sco_static_func, sco_utime, solaris_mutex_init_1, solaris_socket, solaris_unistd, solaris_widec, svr4_krnl, ultrix_atexit_param, ultrix_atof_param, ultrix_const3, ultrix_fix_fixproto, ultrix_ifdef, ultrix_locale, ultrix_math_ifdef, ultrix_nested_ioctl, ultrix_nested_svc, ultrix_stat, ultrix_static, ultrix_stdlib, ultrix_strings, ultrix_strings2, ultrix_sys_time, ultrix_unistd, unicosmk_restrict, uw7_byteorder_fix, windiss_math1, windiss_math2, windiss_valist): Remove. * fixincl.x: Regenerate. * mkfixinc.sh: (arm-semi-aof, hppa1.1-*-osf*, hppa1.1-*-bsd*, i370-*-openedition, i?86-*-moss*, i?86-*-uwin*, powerpc-*-eabiaix*): Remove. * tests/base/math.h: Update. * tests/base/pthread.h: Update. * tests/base/stdio.h: Update. * tests/base/stdlib.h: Update. * tests/base/string.h: Update. * tests/base/strings.h: Update. * tests/base/sys/file.h: Update. * tests/base/sys/limits.h: Update. * tests/base/sys/socket.h: Update. * tests/base/sys/stat.h: Update. * tests/base/sys/time.h: Update. * tests/base/testing.h: Update. * tests/base/unistd.h: Update. * tests/base/_G_config.h: Remove. * tests/base/arpa: Remove directory. * tests/base/fs: Remove directory. * tests/base/locale.h: Remove. * tests/base/machine: Remove directory. * tests/base/rpc/svc.h: Remove. * tests/base/sys/ioctl.h: Remove. * tests/base/sys/regset.h: Remove. * tests/base/sys/times.h: Remove. * tests/base/sys/utsname.h: Remove. * tests/base/widec.h: Remove. gcc: * config.gcc (Obsolete configurations): Remove list of configurations. (Unsupported targets list): Add *-*-linux*aout*, *-*-linux*libc1*, *-*-solaris2.[0-6], *-*-solaris2.[0-6].*, *-*-sysv*. Remove other targets matched by those patterns. (strongarm*-*-*, ep9312*-*-*, xscale-*-*, parisc*-*-*, m680[012]0-*-*, *-*-linux*libc1*, *-*-linux*aout*, alpha*-*-unicosmk*, strongarm*-*-freebsd*, ep9312-*-elf, arm*-*-kaos*, cris-*-aout, parisc*64*-*-linux*, parisc*-*-linux*, hppa1.1-*-pro*, hppa1.1-*-osf*, hppa1.1-*-bsd*, i[34567]86-sequent-ptx4*, i[34567]86-sequent-sysv4*, i[34567]86-*-beoself*, i[34567]86-*-beos*, i[34567]86-*-sco3.2v5*, i[34567]86-*-sysv5*, i[34567]86-*-sysv4*, i[34567]86-*-uwin*, i[34567]86-*-kaos*, m68020-*-elf*, m68010-*-netbsdelf*, mips-wrs-windiss, mt-*-elf, powerpc-*-beos*, powerpc-*-chorusos*, powerpc-wrs-windiss*, powerpcle-*-sysv*, powerpc-*-kaos*, powerpcle-*-kaos*, sh*-*-kaos*, sparc-*-sysv4*, strongarm-*-elf*, strongarm-*-pe, strongarm-*-kaos*, vax-*-bsd*, vax-*-sysv*, vax-*-ultrix*, xscale-*-elf, xscale-*-coff, i[34567]86-*-linux*aout*, i[34567]86-*-linux*libc1): Remove. Make code for Solaris 7 and greater unconditional for Solaris. (ep9312-*-*, parisc1*, m680[012]0-*-*, parisc*-*-*, mt-*-*): Remove --with-* handling. * config/rs6000/sysv4.h (-mwindiss): Remove from all specs. (LIB_WINDISS_SPEC, CPP_OS_WINDISS_SPEC, STARTFILE_WINDISS_SPEC, ENDFILE_WINDISS_SPEC, LINK_START_WINDISS_SPEC, LINK_OS_WINDISS_SPEC): Remove. * config/rs6000/sysv4.opt (mwindiss): Remove. * configure.ac (strongarm*-*-*, xscale*-*-*): Remove. * configure: Regenerate. * doc/cpp.texi: Don't mention BeOS. * doc/extend.texi (interrupt): Don't mention MS1. * doc/install.texi: (i386-@var{any}-sysv, m68k-bull-sysv, m68k-hp-hpux, m68000-hp-hpux, m68000-att-sysv, alphaev5-cray-unicosmk*, xscale-*-*, i?86-*-linux*aout, i?86-*-sco3.2v5*, i?86-*-udk, m68k-hp-hpux, powerpc-*-sysv4, powerpc-*-sysv4, powerpcle-*-sysv4, *-*-sysv*, vax-dec-ultrix): Remove. * doc/invoke.texi (MT Options): Remove. (-mwindiss): Remove. (CRIS Options): Remove cris-axis-aout references. (HPPA Options): Don't mention hppa1.1-*-pro. * doc/md.texi: (MorphoTech family): Remove. * libgcc2.c: Don't handle UWIN. * config/alpha/t-unicosmk: Remove. * config/alpha/unicosmk.h: Remove. * config/arm/kaos-arm.h: Remove. * config/arm/kaos-strongarm.h: Remove. * config/arm/strongarm-coff.h: Remove. * config/arm/strongarm-elf.h: Remove. * config/arm/strongarm-pe.h: Remove. * config/arm/t-strongarm-pe: Remove. * config/arm/t-xscale-coff: Remove. * config/arm/t-xscale-elf: Remove. * config/arm/xscale-coff.h: Remove. * config/arm/xscale-elf.h: Remove. * config/chorus.h: Remove. * config/cris/aout.h: Remove. * config/cris/aout.opt: Remove. * config/cris/t-aout: Remove. * config/i386/beos-elf.h: Remove. * config/i386/kaos-i386.h: Remove. * config/i386/ptx4-i.h: Remove. * config/i386/sco5.h: Remove. * config/i386/sco5.opt: Remove. * config/i386/sysv4-cpp.h: Remove. * config/i386/sysv5.h: Remove. * config/i386/t-beos: Remove. * config/i386/t-sco5: Remove. * config/i386/t-uwin: Remove. * config/i386/uwin.asm: Remove. * config/i386/uwin.h: Remove. * config/kaos.h: Remove. * config/mips/windiss.h: Remove. * config/mt: Remove directory. * config/pa/pa-osf.h: Remove. * config/pa/pa-pro-end.h: Remove. * config/pa/t-pro: Remove. * config/ptx4.h: Remove. * config/rs6000/beos.h: Remove. * config/rs6000/kaos-ppc.h: Remove. * config/rs6000/t-beos: Remove. * config/rs6000/windiss.h: Remove. * config/sh/kaos-sh.h: Remove. * config/sol2-6.h: Remove. * config/sparc/sol26-sld.h: Remove. * config/sparc/sysv4-only.h: Remove. * config/vax/bsd.h: Remove. * config/vax/t-memfuncs: Remove. * config/vax/ultrix.h: Remove. * config/vax/vaxv.h: Remove. * config/windiss.h: Remove. gcc/testsuite: * g++.dg/abi/arm_cxa_vec1.C: Don't handle xscale*-*-*. * g++.dg/eh/spbp.C: Don't handle *-*-solaris2.[56]*. * g++.dg/warn/miss-format-1.C: Don't handle Solaris before Solaris 7. * gcc.c-torture/compile/981006-1.c: Don't handle xscale*-*-*, strongarm*-*-* and cris-*-aout*. * gcc.c-torture/execute/941014-1.x: Don't handle xscale*-*-* and strongarm*-*-*. * gcc.dg/20030909-1.c: Don't handle xscale*-*-* and strongarm*-*-*. * gcc.dg/20031108-1.c: Don't handle xscale*-*-* and strongarm*-*-*. * gcc.dg/20040813-1.c: Don't handle *-*-sysv5*. * gcc.dg/arm-asm.c: Don't handle strongarm*-*-* and xscale*-*-*. * gcc.dg/arm-scd42-1.c: Use target arm*-*-*. * gcc.dg/arm-scd42-3.c: Use target arm*-*-*. * gcc.dg/cpp/assert4.c: Don't handle BeOS. * gcc.dg/debug/pr35154.c: Don't handle *-*-sysv5*. * gcc.dg/intmax_t-1.c: Don't handle *-*-solaris2.5.1 and xscale*-*-elf*. * gcc.dg/pragma-align.c: Don't handle i?86-*-sco3.2v5*. * gcc.dg/pthread-init-2.c: Don't handle *-*-solaris2.5.1. * gcc.misc-tests/arm-isr.exp: Use target arm*-*-*. * gcc.target/powerpc/ppc-sdata-1.c: Don't handle powerpc-*-sysv*. * gcc.target/powerpc/ppc-sdata-2.c: Don't handle powerpc-*-sysv*. * gcc.target/powerpc/ppc-stackalign-1.c: Don't handle powerpc-*-sysv*. * gfortran.dg/debug/pr35154-stabs.f: Don't handle *-*-sysv5*. * lib/target-supports.exp: Don't handle strongarm*-*-elf, xscale*-*-elf and *-*-windiss. * obj-c++.dg/dwarf-2.mm: Don't handle *-*-solaris2.[56]*. * objc.dg/dwarf-1.m: Don't handle *-*-solaris2.[56]*. * objc.dg/dwarf-2.m: Don't handle *-*-solaris2.[56]*. * gcc.dg/mt-loopi1.c: Remove. gnattools: * configure.ac (xscale*-wrs-vx*, xscale*-wrs-coff): Remove. * configure: Regenerate. libcpp: * configure.ac (parisc*64*-*-*): Remove. * configure: Regenerate. libffi: * configure.ac (parisc*-*-linux*, powerpc-*-sysv*, powerpc-*-beos*): Remove. * configure: Regenerate. libgcc: * config.host (strongarm*-*-*, ep9312*-*-*, xscale-*-*, parisc*-*-*, m680[012]0-*-*, *-*-linux*libc1*, *-*-linux*aout*, alpha*-*-unicosmk*, strongarm*-*-freebsd*, ep9312-*-elf, arm*-*-kaos*, cris-*-aout, parisc*64*-*-linux*, parisc*-*-linux*, hppa1.1-*-pro*, hppa1.1-*-osf*, hppa1.1-*-bsd*, i[34567]86-sequent-ptx4*, i[34567]86-sequent-sysv4*, i[34567]86-*-beoself*, i[34567]86-*-beos*, i[34567]86-*-sco3.2v5*, i[34567]86-*-sysv5*, i[34567]86-*-sysv4*, i[34567]86-*-uwin*, i[34567]86-*-kaos*, m68020-*-elf*, m68010-*-netbsdelf*, mips-wrs-windiss, mt-*-elf, powerpc-*-beos*, powerpc-*-chorusos*, powerpc-wrs-windiss*, powerpcle-*-sysv*, powerpc-*-kaos*, powerpcle-*-kaos*, sh*-*-kaos*, sparc-*-sysv4*, strongarm-*-elf*, strongarm-*-pe, strongarm-*-kaos*, vax-*-bsd*, vax-*-sysv*, vax-*-ultrix*, xscale-*-elf, xscale-*-coff): Remove. libjava: * configure.host (strongarm*-elf, xscale*-elf): Remove. libstdc++-v3: * configure.host (xscale, ep9312, m680[246]0, solaris2.5, solaris2.5.[0-9], solaris2.6, windiss*): Remove. * crossconfig.m4 (*-solaris2.5, *-solaris2.6, *-windiss*): Remove. * configure: Regenerate. * config/os/solaris/solaris2.5: Remove directory. * config/os/solaris/solaris2.6: Remove directory. * config/os/windiss: Remove directory. From-SVN: r136534
2008-06-07 20:00:15 +02:00
#endif /* WINNT && ! __CYGWIN__ */
#ifdef TRANSFER_FROM_TRAMPOLINE
TRANSFER_FROM_TRAMPOLINE
1992-01-28 04:44:05 +01:00
#endif
#endif /* L_trampoline */
#ifndef __CYGWIN__
1992-01-28 04:44:05 +01:00
#ifdef L__main
#include "gbl-ctors.h"
/* Some systems use __main in a way incompatible with its use in gcc, in these
cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
give the same symbol without quotes for an alternative entry point. You
1996-07-04 00:07:53 +02:00
must define both, or neither. */
#ifndef NAME__MAIN
#define NAME__MAIN "__main"
#define SYMBOL__MAIN __main
#endif
1992-01-28 04:44:05 +01:00
Use -fbuilding-libgcc for more target macros used in libgcc. gcc/c-family: * c-cppbuiltin.c (c_cpp_builtins): Also define __LIBGCC_EH_TABLES_CAN_BE_READ_ONLY__, __LIBGCC_EH_FRAME_SECTION_NAME__, __LIBGCC_JCR_SECTION_NAME__, __LIBGCC_CTORS_SECTION_ASM_OP__, __LIBGCC_DTORS_SECTION_ASM_OP__, __LIBGCC_TEXT_SECTION_ASM_OP__, __LIBGCC_INIT_SECTION_ASM_OP__, __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__, __LIBGCC_STACK_GROWS_DOWNWARD__, __LIBGCC_DONT_USE_BUILTIN_SETJMP__, __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__, __LIBGCC_DWARF_FRAME_REGISTERS__, __LIBGCC_EH_RETURN_STACKADJ_RTX__, __LIBGCC_JMP_BUF_SIZE__, __LIBGCC_STACK_POINTER_REGNUM__ and __LIBGCC_VTABLE_USES_DESCRIPTORS__ for -fbuilding-libgcc. (builtin_define_with_value): Handle backslash-escaping in string macro values. libgcc: * Makefile.in (CRTSTUFF_CFLAGS): Add -fbuilding-libgcc. * config/aarch64/linux-unwind.h (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/alpha/vms-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change use to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/cr16/unwind-cr16.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. (DWARF_FRAME_REGISTERS): Change all uses to __LIBGCC_DWARF_FRAME_REGISTERS__. (EH_RETURN_STACKADJ_RTX): Change all uses to __LIBGCC_EH_RETURN_STACKADJ_RTX__. * config/cr16/unwind-dw2.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. Remove conditional definition. * config/i386/cygming-crtbegin.c (EH_FRAME_SECTION_NAME): Change use to __LIBGCC_EH_FRAME_SECTION_NAME__. (JCR_SECTION_NAME): Change use to __LIBGCC_JCR_SECTION_NAME__. * config/i386/cygming-crtend.c (EH_FRAME_SECTION_NAME): Change use to __LIBGCC_EH_FRAME_SECTION_NAME__. (JCR_SECTION_NAME): Change use to __LIBGCC_JCR_SECTION_NAME__ * config/mips/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/nios2/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/pa/hpux-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/pa/linux-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/rs6000/aix-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. * config/rs6000/darwin-fallback.c (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/rs6000/linux-unwind.h (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. * config/sparc/linux-unwind.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * config/sparc/sol2-unwind.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * config/tilepro/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/xtensa/unwind-dw2-xtensa.h (DWARF_FRAME_REGISTERS): Remove conditional definition. * crtstuff.c (TEXT_SECTION_ASM_OP): Change all uses to __LIBGCC_TEXT_SECTION_ASM_OP__. (EH_FRAME_SECTION_NAME): Change all uses to __LIBGCC_EH_FRAME_SECTION_NAME__. (EH_TABLES_CAN_BE_READ_ONLY): Change all uses to __LIBGCC_EH_TABLES_CAN_BE_READ_ONLY__. (CTORS_SECTION_ASM_OP): Change all uses to __LIBGCC_CTORS_SECTION_ASM_OP__. (DTORS_SECTION_ASM_OP): Change all uses to __LIBGCC_DTORS_SECTION_ASM_OP__. (JCR_SECTION_NAME): Change all uses to __LIBGCC_JCR_SECTION_NAME__. (INIT_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_SECTION_ASM_OP__. (INIT_ARRAY_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__. * generic-morestack.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. * libgcc2.c (INIT_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_SECTION_ASM_OP__. (INIT_ARRAY_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__. (EH_FRAME_SECTION_NAME): Change all uses to __LIBGCC_EH_FRAME_SECTION_NAME__. * libgcov-profiler.c (VTABLE_USES_DESCRIPTORS): Remove conditional definitions. Change all uses to __LIBGCC_VTABLE_USES_DESCRIPTORS__. * unwind-dw2.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. (DWARF_FRAME_REGISTERS): Change all uses to __LIBGCC_DWARF_FRAME_REGISTERS__. (EH_RETURN_STACKADJ_RTX): Change all uses to __LIBGCC_EH_RETURN_STACKADJ_RTX__. * unwind-dw2.h (DWARF_FRAME_REGISTERS): Remove conditional definition. Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * unwind-sjlj.c (DONT_USE_BUILTIN_SETJMP): Change all uses to __LIBGCC_DONT_USE_BUILTIN_SETJMP__. (JMP_BUF_SIZE): Change use to __LIBGCC_JMP_BUF_SIZE__. From-SVN: r214954
2014-09-05 14:03:46 +02:00
#if defined (__LIBGCC_INIT_SECTION_ASM_OP__) \
|| defined (__LIBGCC_INIT_ARRAY_SECTION_ASM_OP__)
1996-10-16 22:25:25 +02:00
#undef HAS_INIT_SECTION
#define HAS_INIT_SECTION
#endif
#if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
/* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
code to run constructors. In that case, we need to handle EH here, too.
But MINGW32 is special because it handles CRTSTUFF and EH on its own. */
#ifdef __MINGW32__
#undef __LIBGCC_EH_FRAME_SECTION_NAME__
#endif
Use -fbuilding-libgcc for more target macros used in libgcc. gcc/c-family: * c-cppbuiltin.c (c_cpp_builtins): Also define __LIBGCC_EH_TABLES_CAN_BE_READ_ONLY__, __LIBGCC_EH_FRAME_SECTION_NAME__, __LIBGCC_JCR_SECTION_NAME__, __LIBGCC_CTORS_SECTION_ASM_OP__, __LIBGCC_DTORS_SECTION_ASM_OP__, __LIBGCC_TEXT_SECTION_ASM_OP__, __LIBGCC_INIT_SECTION_ASM_OP__, __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__, __LIBGCC_STACK_GROWS_DOWNWARD__, __LIBGCC_DONT_USE_BUILTIN_SETJMP__, __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__, __LIBGCC_DWARF_FRAME_REGISTERS__, __LIBGCC_EH_RETURN_STACKADJ_RTX__, __LIBGCC_JMP_BUF_SIZE__, __LIBGCC_STACK_POINTER_REGNUM__ and __LIBGCC_VTABLE_USES_DESCRIPTORS__ for -fbuilding-libgcc. (builtin_define_with_value): Handle backslash-escaping in string macro values. libgcc: * Makefile.in (CRTSTUFF_CFLAGS): Add -fbuilding-libgcc. * config/aarch64/linux-unwind.h (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/alpha/vms-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change use to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/cr16/unwind-cr16.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. (DWARF_FRAME_REGISTERS): Change all uses to __LIBGCC_DWARF_FRAME_REGISTERS__. (EH_RETURN_STACKADJ_RTX): Change all uses to __LIBGCC_EH_RETURN_STACKADJ_RTX__. * config/cr16/unwind-dw2.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. Remove conditional definition. * config/i386/cygming-crtbegin.c (EH_FRAME_SECTION_NAME): Change use to __LIBGCC_EH_FRAME_SECTION_NAME__. (JCR_SECTION_NAME): Change use to __LIBGCC_JCR_SECTION_NAME__. * config/i386/cygming-crtend.c (EH_FRAME_SECTION_NAME): Change use to __LIBGCC_EH_FRAME_SECTION_NAME__. (JCR_SECTION_NAME): Change use to __LIBGCC_JCR_SECTION_NAME__ * config/mips/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/nios2/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/pa/hpux-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/pa/linux-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/rs6000/aix-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. * config/rs6000/darwin-fallback.c (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/rs6000/linux-unwind.h (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. * config/sparc/linux-unwind.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * config/sparc/sol2-unwind.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * config/tilepro/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/xtensa/unwind-dw2-xtensa.h (DWARF_FRAME_REGISTERS): Remove conditional definition. * crtstuff.c (TEXT_SECTION_ASM_OP): Change all uses to __LIBGCC_TEXT_SECTION_ASM_OP__. (EH_FRAME_SECTION_NAME): Change all uses to __LIBGCC_EH_FRAME_SECTION_NAME__. (EH_TABLES_CAN_BE_READ_ONLY): Change all uses to __LIBGCC_EH_TABLES_CAN_BE_READ_ONLY__. (CTORS_SECTION_ASM_OP): Change all uses to __LIBGCC_CTORS_SECTION_ASM_OP__. (DTORS_SECTION_ASM_OP): Change all uses to __LIBGCC_DTORS_SECTION_ASM_OP__. (JCR_SECTION_NAME): Change all uses to __LIBGCC_JCR_SECTION_NAME__. (INIT_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_SECTION_ASM_OP__. (INIT_ARRAY_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__. * generic-morestack.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. * libgcc2.c (INIT_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_SECTION_ASM_OP__. (INIT_ARRAY_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__. (EH_FRAME_SECTION_NAME): Change all uses to __LIBGCC_EH_FRAME_SECTION_NAME__. * libgcov-profiler.c (VTABLE_USES_DESCRIPTORS): Remove conditional definitions. Change all uses to __LIBGCC_VTABLE_USES_DESCRIPTORS__. * unwind-dw2.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. (DWARF_FRAME_REGISTERS): Change all uses to __LIBGCC_DWARF_FRAME_REGISTERS__. (EH_RETURN_STACKADJ_RTX): Change all uses to __LIBGCC_EH_RETURN_STACKADJ_RTX__. * unwind-dw2.h (DWARF_FRAME_REGISTERS): Remove conditional definition. Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * unwind-sjlj.c (DONT_USE_BUILTIN_SETJMP): Change all uses to __LIBGCC_DONT_USE_BUILTIN_SETJMP__. (JMP_BUF_SIZE): Change use to __LIBGCC_JMP_BUF_SIZE__. From-SVN: r214954
2014-09-05 14:03:46 +02:00
#ifdef __LIBGCC_EH_FRAME_SECTION_NAME__
#include "unwind-dw2-fde.h"
extern unsigned char __EH_FRAME_BEGIN__[];
#endif
1992-01-28 04:44:05 +01:00
/* Run all the global destructors on exit from the program. */
void
__do_global_dtors (void)
1992-01-28 04:44:05 +01:00
{
#ifdef DO_GLOBAL_DTORS_BODY
DO_GLOBAL_DTORS_BODY;
#else
static func_ptr *p = __DTOR_LIST__ + 1;
while (*p)
{
p++;
(*(p-1)) ();
}
#endif
Use -fbuilding-libgcc for more target macros used in libgcc. gcc/c-family: * c-cppbuiltin.c (c_cpp_builtins): Also define __LIBGCC_EH_TABLES_CAN_BE_READ_ONLY__, __LIBGCC_EH_FRAME_SECTION_NAME__, __LIBGCC_JCR_SECTION_NAME__, __LIBGCC_CTORS_SECTION_ASM_OP__, __LIBGCC_DTORS_SECTION_ASM_OP__, __LIBGCC_TEXT_SECTION_ASM_OP__, __LIBGCC_INIT_SECTION_ASM_OP__, __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__, __LIBGCC_STACK_GROWS_DOWNWARD__, __LIBGCC_DONT_USE_BUILTIN_SETJMP__, __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__, __LIBGCC_DWARF_FRAME_REGISTERS__, __LIBGCC_EH_RETURN_STACKADJ_RTX__, __LIBGCC_JMP_BUF_SIZE__, __LIBGCC_STACK_POINTER_REGNUM__ and __LIBGCC_VTABLE_USES_DESCRIPTORS__ for -fbuilding-libgcc. (builtin_define_with_value): Handle backslash-escaping in string macro values. libgcc: * Makefile.in (CRTSTUFF_CFLAGS): Add -fbuilding-libgcc. * config/aarch64/linux-unwind.h (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/alpha/vms-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change use to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/cr16/unwind-cr16.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. (DWARF_FRAME_REGISTERS): Change all uses to __LIBGCC_DWARF_FRAME_REGISTERS__. (EH_RETURN_STACKADJ_RTX): Change all uses to __LIBGCC_EH_RETURN_STACKADJ_RTX__. * config/cr16/unwind-dw2.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. Remove conditional definition. * config/i386/cygming-crtbegin.c (EH_FRAME_SECTION_NAME): Change use to __LIBGCC_EH_FRAME_SECTION_NAME__. (JCR_SECTION_NAME): Change use to __LIBGCC_JCR_SECTION_NAME__. * config/i386/cygming-crtend.c (EH_FRAME_SECTION_NAME): Change use to __LIBGCC_EH_FRAME_SECTION_NAME__. (JCR_SECTION_NAME): Change use to __LIBGCC_JCR_SECTION_NAME__ * config/mips/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/nios2/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/pa/hpux-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/pa/linux-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/rs6000/aix-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. * config/rs6000/darwin-fallback.c (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/rs6000/linux-unwind.h (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. * config/sparc/linux-unwind.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * config/sparc/sol2-unwind.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * config/tilepro/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/xtensa/unwind-dw2-xtensa.h (DWARF_FRAME_REGISTERS): Remove conditional definition. * crtstuff.c (TEXT_SECTION_ASM_OP): Change all uses to __LIBGCC_TEXT_SECTION_ASM_OP__. (EH_FRAME_SECTION_NAME): Change all uses to __LIBGCC_EH_FRAME_SECTION_NAME__. (EH_TABLES_CAN_BE_READ_ONLY): Change all uses to __LIBGCC_EH_TABLES_CAN_BE_READ_ONLY__. (CTORS_SECTION_ASM_OP): Change all uses to __LIBGCC_CTORS_SECTION_ASM_OP__. (DTORS_SECTION_ASM_OP): Change all uses to __LIBGCC_DTORS_SECTION_ASM_OP__. (JCR_SECTION_NAME): Change all uses to __LIBGCC_JCR_SECTION_NAME__. (INIT_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_SECTION_ASM_OP__. (INIT_ARRAY_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__. * generic-morestack.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. * libgcc2.c (INIT_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_SECTION_ASM_OP__. (INIT_ARRAY_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__. (EH_FRAME_SECTION_NAME): Change all uses to __LIBGCC_EH_FRAME_SECTION_NAME__. * libgcov-profiler.c (VTABLE_USES_DESCRIPTORS): Remove conditional definitions. Change all uses to __LIBGCC_VTABLE_USES_DESCRIPTORS__. * unwind-dw2.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. (DWARF_FRAME_REGISTERS): Change all uses to __LIBGCC_DWARF_FRAME_REGISTERS__. (EH_RETURN_STACKADJ_RTX): Change all uses to __LIBGCC_EH_RETURN_STACKADJ_RTX__. * unwind-dw2.h (DWARF_FRAME_REGISTERS): Remove conditional definition. Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * unwind-sjlj.c (DONT_USE_BUILTIN_SETJMP): Change all uses to __LIBGCC_DONT_USE_BUILTIN_SETJMP__. (JMP_BUF_SIZE): Change use to __LIBGCC_JMP_BUF_SIZE__. From-SVN: r214954
2014-09-05 14:03:46 +02:00
#if defined (__LIBGCC_EH_FRAME_SECTION_NAME__) && !defined (HAS_INIT_SECTION)
{
static int completed = 0;
if (! completed)
{
completed = 1;
__deregister_frame_info (__EH_FRAME_BEGIN__);
}
}
#endif
1992-01-28 04:44:05 +01:00
}
#endif
1992-01-28 04:44:05 +01:00
1996-10-16 22:25:25 +02:00
#ifndef HAS_INIT_SECTION
1992-01-28 04:44:05 +01:00
/* Run all the global constructors on entry to the program. */
void
__do_global_ctors (void)
1992-01-28 04:44:05 +01:00
{
Use -fbuilding-libgcc for more target macros used in libgcc. gcc/c-family: * c-cppbuiltin.c (c_cpp_builtins): Also define __LIBGCC_EH_TABLES_CAN_BE_READ_ONLY__, __LIBGCC_EH_FRAME_SECTION_NAME__, __LIBGCC_JCR_SECTION_NAME__, __LIBGCC_CTORS_SECTION_ASM_OP__, __LIBGCC_DTORS_SECTION_ASM_OP__, __LIBGCC_TEXT_SECTION_ASM_OP__, __LIBGCC_INIT_SECTION_ASM_OP__, __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__, __LIBGCC_STACK_GROWS_DOWNWARD__, __LIBGCC_DONT_USE_BUILTIN_SETJMP__, __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__, __LIBGCC_DWARF_FRAME_REGISTERS__, __LIBGCC_EH_RETURN_STACKADJ_RTX__, __LIBGCC_JMP_BUF_SIZE__, __LIBGCC_STACK_POINTER_REGNUM__ and __LIBGCC_VTABLE_USES_DESCRIPTORS__ for -fbuilding-libgcc. (builtin_define_with_value): Handle backslash-escaping in string macro values. libgcc: * Makefile.in (CRTSTUFF_CFLAGS): Add -fbuilding-libgcc. * config/aarch64/linux-unwind.h (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/alpha/vms-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change use to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/cr16/unwind-cr16.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. (DWARF_FRAME_REGISTERS): Change all uses to __LIBGCC_DWARF_FRAME_REGISTERS__. (EH_RETURN_STACKADJ_RTX): Change all uses to __LIBGCC_EH_RETURN_STACKADJ_RTX__. * config/cr16/unwind-dw2.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. Remove conditional definition. * config/i386/cygming-crtbegin.c (EH_FRAME_SECTION_NAME): Change use to __LIBGCC_EH_FRAME_SECTION_NAME__. (JCR_SECTION_NAME): Change use to __LIBGCC_JCR_SECTION_NAME__. * config/i386/cygming-crtend.c (EH_FRAME_SECTION_NAME): Change use to __LIBGCC_EH_FRAME_SECTION_NAME__. (JCR_SECTION_NAME): Change use to __LIBGCC_JCR_SECTION_NAME__ * config/mips/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/nios2/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/pa/hpux-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/pa/linux-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. * config/rs6000/aix-unwind.h (DWARF_ALT_FRAME_RETURN_COLUMN): Change all uses to __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__. (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. * config/rs6000/darwin-fallback.c (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/rs6000/linux-unwind.h (STACK_POINTER_REGNUM): Change all uses to __LIBGCC_STACK_POINTER_REGNUM__. * config/sparc/linux-unwind.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * config/sparc/sol2-unwind.h (DWARF_FRAME_REGISTERS): Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * config/tilepro/linux-unwind.h (STACK_POINTER_REGNUM): Change use to __LIBGCC_STACK_POINTER_REGNUM__. * config/xtensa/unwind-dw2-xtensa.h (DWARF_FRAME_REGISTERS): Remove conditional definition. * crtstuff.c (TEXT_SECTION_ASM_OP): Change all uses to __LIBGCC_TEXT_SECTION_ASM_OP__. (EH_FRAME_SECTION_NAME): Change all uses to __LIBGCC_EH_FRAME_SECTION_NAME__. (EH_TABLES_CAN_BE_READ_ONLY): Change all uses to __LIBGCC_EH_TABLES_CAN_BE_READ_ONLY__. (CTORS_SECTION_ASM_OP): Change all uses to __LIBGCC_CTORS_SECTION_ASM_OP__. (DTORS_SECTION_ASM_OP): Change all uses to __LIBGCC_DTORS_SECTION_ASM_OP__. (JCR_SECTION_NAME): Change all uses to __LIBGCC_JCR_SECTION_NAME__. (INIT_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_SECTION_ASM_OP__. (INIT_ARRAY_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__. * generic-morestack.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. * libgcc2.c (INIT_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_SECTION_ASM_OP__. (INIT_ARRAY_SECTION_ASM_OP): Change all uses to __LIBGCC_INIT_ARRAY_SECTION_ASM_OP__. (EH_FRAME_SECTION_NAME): Change all uses to __LIBGCC_EH_FRAME_SECTION_NAME__. * libgcov-profiler.c (VTABLE_USES_DESCRIPTORS): Remove conditional definitions. Change all uses to __LIBGCC_VTABLE_USES_DESCRIPTORS__. * unwind-dw2.c (STACK_GROWS_DOWNWARD): Change all uses to __LIBGCC_STACK_GROWS_DOWNWARD__. (DWARF_FRAME_REGISTERS): Change all uses to __LIBGCC_DWARF_FRAME_REGISTERS__. (EH_RETURN_STACKADJ_RTX): Change all uses to __LIBGCC_EH_RETURN_STACKADJ_RTX__. * unwind-dw2.h (DWARF_FRAME_REGISTERS): Remove conditional definition. Change use to __LIBGCC_DWARF_FRAME_REGISTERS__. * unwind-sjlj.c (DONT_USE_BUILTIN_SETJMP): Change all uses to __LIBGCC_DONT_USE_BUILTIN_SETJMP__. (JMP_BUF_SIZE): Change use to __LIBGCC_JMP_BUF_SIZE__. From-SVN: r214954
2014-09-05 14:03:46 +02:00
#ifdef __LIBGCC_EH_FRAME_SECTION_NAME__
{
static struct object object;
__register_frame_info (__EH_FRAME_BEGIN__, &object);
}
#endif
1992-01-28 04:44:05 +01:00
DO_GLOBAL_CTORS_BODY;
atexit (__do_global_dtors);
1992-01-28 04:44:05 +01:00
}
1996-10-16 22:25:25 +02:00
#endif /* no HAS_INIT_SECTION */
1992-01-28 04:44:05 +01:00
1996-10-16 22:25:25 +02:00
#if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
1992-01-28 04:44:05 +01:00
/* Subroutine called automatically by `main'.
Compiling a global function named `main'
produces an automatic call to this function at the beginning.
For many systems, this routine calls __do_global_ctors.
For systems which support a .init section we use the .init section
to run __do_global_ctors, so we need not do anything here. */
extern void SYMBOL__MAIN (void);
1992-01-28 04:44:05 +01:00
void
SYMBOL__MAIN (void)
1992-01-28 04:44:05 +01:00
{
/* Support recursive calls to `main': run initializers just once. */
static int initialized;
1992-01-28 04:44:05 +01:00
if (! initialized)
{
initialized = 1;
__do_global_ctors ();
}
}
1996-10-16 22:25:25 +02:00
#endif /* no HAS_INIT_SECTION or INVOKE__main */
1992-01-28 04:44:05 +01:00
#endif /* L__main */
#endif /* __CYGWIN__ */
1992-01-28 04:44:05 +01:00
#ifdef L_ctors
1992-01-28 04:44:05 +01:00
#include "gbl-ctors.h"
/* Provide default definitions for the lists of constructors and
destructors, so that we don't get linker errors. These symbols are
intentionally bss symbols, so that gld and/or collect will provide
the right values. */
1992-01-28 04:44:05 +01:00
/* We declare the lists here with two elements each,
so that they are valid empty lists if no other definition is loaded.
If we are using the old "set" extensions to have the gnu linker
collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
must be in the bss/common section.
Long term no port should use those extensions. But many still do. */
#if !defined(__LIBGCC_INIT_SECTION_ASM_OP__)
#if defined (TARGET_ASM_CONSTRUCTOR) || defined (USE_COLLECT2)
func_ptr __CTOR_LIST__[2] = {0, 0};
func_ptr __DTOR_LIST__[2] = {0, 0};
#else
func_ptr __CTOR_LIST__[2];
func_ptr __DTOR_LIST__[2];
#endif
#endif /* no __LIBGCC_INIT_SECTION_ASM_OP__ */
#endif /* L_ctors */
#endif /* LIBGCC2_UNITS_PER_WORD <= MIN_UNITS_PER_WORD */