2006-03-03 01:38:20 +01:00
|
|
|
/* Operations with long integers.
|
2010-04-15 00:05:32 +02:00
|
|
|
Copyright (C) 2006, 2007, 2009, 2010 Free Software Foundation, Inc.
|
2009-11-25 11:55:54 +01:00
|
|
|
|
2006-03-03 01:38:20 +01:00
|
|
|
This file is part of GCC.
|
2009-11-25 11:55:54 +01:00
|
|
|
|
2006-03-03 01:38:20 +01:00
|
|
|
GCC is free software; you can redistribute it and/or modify it
|
|
|
|
under the terms of the GNU General Public License as published by the
|
2007-07-26 10:37:01 +02:00
|
|
|
Free Software Foundation; either version 3, or (at your option) any
|
2006-03-03 01:38:20 +01:00
|
|
|
later version.
|
2009-11-25 11:55:54 +01:00
|
|
|
|
2006-03-03 01:38:20 +01:00
|
|
|
GCC is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
for more details.
|
2009-11-25 11:55:54 +01:00
|
|
|
|
2006-03-03 01:38:20 +01:00
|
|
|
You should have received a copy of the GNU General Public License
|
2007-07-26 10:37:01 +02:00
|
|
|
along with GCC; see the file COPYING3. If not see
|
|
|
|
<http://www.gnu.org/licenses/>. */
|
2006-03-03 01:38:20 +01:00
|
|
|
|
|
|
|
#include "config.h"
|
|
|
|
#include "system.h"
|
|
|
|
#include "coretypes.h"
|
2010-11-16 14:57:21 +01:00
|
|
|
#include "tm.h" /* For SHIFT_COUNT_TRUNCATED. */
|
2006-03-03 01:38:20 +01:00
|
|
|
#include "tree.h"
|
|
|
|
|
fold-const.c (LOWPART, [...]): Move ...
2010-04-15 Richard Guenther <rguenther@suse.de>
* fold-const.c (LOWPART, HIGHPART, BASE, encode, decode,
fit_double_type, force_fit_type_double, add_double_with_sign,
neg_double, mul_double_with_sign, lshift_double, rshift_double,
lrotate_double, rrotate_double, div_and_round_double): Move ...
* double-int.c: ... here.
* tree.h (force_fit_type_double, fit_double_type, add_double_with_sign,
add_double, neg_double, mul_double_with_sign, mul_double,
lshift_double, rshift_double, lrotate_double, rrotate_double,
div_and_round_double): Move prototypes ...
* double-int.h: ... here.
From-SVN: r158372
2010-04-15 14:45:58 +02:00
|
|
|
/* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
|
|
|
|
overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
|
|
|
|
and SUM1. Then this yields nonzero if overflow occurred during the
|
|
|
|
addition.
|
|
|
|
|
|
|
|
Overflow occurs if A and B have the same sign, but A and SUM differ in
|
|
|
|
sign. Use `^' to test whether signs differ, and `< 0' to isolate the
|
|
|
|
sign. */
|
|
|
|
#define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
|
|
|
|
|
|
|
|
/* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
|
|
|
|
We do that by representing the two-word integer in 4 words, with only
|
|
|
|
HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
|
|
|
|
number. The value of the word is LOWPART + HIGHPART * BASE. */
|
|
|
|
|
|
|
|
#define LOWPART(x) \
|
|
|
|
((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
|
|
|
|
#define HIGHPART(x) \
|
|
|
|
((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
|
|
|
|
#define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
|
|
|
|
|
|
|
|
/* Unpack a two-word integer into 4 words.
|
|
|
|
LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
|
|
|
|
WORDS points to the array of HOST_WIDE_INTs. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
|
|
|
|
{
|
|
|
|
words[0] = LOWPART (low);
|
|
|
|
words[1] = HIGHPART (low);
|
|
|
|
words[2] = LOWPART (hi);
|
|
|
|
words[3] = HIGHPART (hi);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pack an array of 4 words into a two-word integer.
|
|
|
|
WORDS points to the array of words.
|
|
|
|
The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
|
|
|
|
HOST_WIDE_INT *hi)
|
|
|
|
{
|
|
|
|
*low = words[0] + words[1] * BASE;
|
|
|
|
*hi = words[2] + words[3] * BASE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add two doubleword integers with doubleword result.
|
|
|
|
Return nonzero if the operation overflows according to UNSIGNED_P.
|
|
|
|
Each argument is given as two `HOST_WIDE_INT' pieces.
|
|
|
|
One argument is L1 and H1; the other, L2 and H2.
|
|
|
|
The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
|
|
|
|
|
|
|
|
int
|
|
|
|
add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
|
|
|
|
unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
|
|
|
|
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
|
|
|
|
bool unsigned_p)
|
|
|
|
{
|
|
|
|
unsigned HOST_WIDE_INT l;
|
|
|
|
HOST_WIDE_INT h;
|
|
|
|
|
|
|
|
l = l1 + l2;
|
|
|
|
h = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) h1
|
|
|
|
+ (unsigned HOST_WIDE_INT) h2
|
|
|
|
+ (l < l1));
|
|
|
|
|
|
|
|
*lv = l;
|
|
|
|
*hv = h;
|
|
|
|
|
|
|
|
if (unsigned_p)
|
|
|
|
return ((unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1
|
|
|
|
|| (h == h1
|
|
|
|
&& l < l1));
|
|
|
|
else
|
|
|
|
return OVERFLOW_SUM_SIGN (h1, h2, h);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Negate a doubleword integer with doubleword result.
|
|
|
|
Return nonzero if the operation overflows, assuming it's signed.
|
|
|
|
The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
|
|
|
|
The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
|
|
|
|
|
|
|
|
int
|
|
|
|
neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
|
|
|
|
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
|
|
|
|
{
|
|
|
|
if (l1 == 0)
|
|
|
|
{
|
|
|
|
*lv = 0;
|
|
|
|
*hv = - h1;
|
|
|
|
return (*hv & h1) < 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
*lv = -l1;
|
|
|
|
*hv = ~h1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Multiply two doubleword integers with doubleword result.
|
|
|
|
Return nonzero if the operation overflows according to UNSIGNED_P.
|
|
|
|
Each argument is given as two `HOST_WIDE_INT' pieces.
|
|
|
|
One argument is L1 and H1; the other, L2 and H2.
|
|
|
|
The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
|
|
|
|
|
|
|
|
int
|
|
|
|
mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
|
|
|
|
unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
|
|
|
|
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
|
|
|
|
bool unsigned_p)
|
|
|
|
{
|
|
|
|
HOST_WIDE_INT arg1[4];
|
|
|
|
HOST_WIDE_INT arg2[4];
|
|
|
|
HOST_WIDE_INT prod[4 * 2];
|
|
|
|
unsigned HOST_WIDE_INT carry;
|
|
|
|
int i, j, k;
|
|
|
|
unsigned HOST_WIDE_INT toplow, neglow;
|
|
|
|
HOST_WIDE_INT tophigh, neghigh;
|
|
|
|
|
|
|
|
encode (arg1, l1, h1);
|
|
|
|
encode (arg2, l2, h2);
|
|
|
|
|
|
|
|
memset (prod, 0, sizeof prod);
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++)
|
|
|
|
{
|
|
|
|
carry = 0;
|
|
|
|
for (j = 0; j < 4; j++)
|
|
|
|
{
|
|
|
|
k = i + j;
|
|
|
|
/* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
|
|
|
|
carry += arg1[i] * arg2[j];
|
|
|
|
/* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
|
|
|
|
carry += prod[k];
|
|
|
|
prod[k] = LOWPART (carry);
|
|
|
|
carry = HIGHPART (carry);
|
|
|
|
}
|
|
|
|
prod[i + 4] = carry;
|
|
|
|
}
|
|
|
|
|
|
|
|
decode (prod, lv, hv);
|
|
|
|
decode (prod + 4, &toplow, &tophigh);
|
|
|
|
|
|
|
|
/* Unsigned overflow is immediate. */
|
|
|
|
if (unsigned_p)
|
|
|
|
return (toplow | tophigh) != 0;
|
|
|
|
|
|
|
|
/* Check for signed overflow by calculating the signed representation of the
|
|
|
|
top half of the result; it should agree with the low half's sign bit. */
|
|
|
|
if (h1 < 0)
|
|
|
|
{
|
|
|
|
neg_double (l2, h2, &neglow, &neghigh);
|
|
|
|
add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
|
|
|
|
}
|
|
|
|
if (h2 < 0)
|
|
|
|
{
|
|
|
|
neg_double (l1, h1, &neglow, &neghigh);
|
|
|
|
add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
|
|
|
|
}
|
|
|
|
return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shift the doubleword integer in L1, H1 left by COUNT places
|
|
|
|
keeping only PREC bits of result.
|
|
|
|
Shift right if COUNT is negative.
|
|
|
|
ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
|
|
|
|
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
|
|
|
|
|
|
|
|
void
|
|
|
|
lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
|
|
|
|
HOST_WIDE_INT count, unsigned int prec,
|
|
|
|
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, bool arith)
|
|
|
|
{
|
|
|
|
unsigned HOST_WIDE_INT signmask;
|
|
|
|
|
|
|
|
if (count < 0)
|
|
|
|
{
|
|
|
|
rshift_double (l1, h1, -count, prec, lv, hv, arith);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (SHIFT_COUNT_TRUNCATED)
|
|
|
|
count %= prec;
|
|
|
|
|
|
|
|
if (count >= 2 * HOST_BITS_PER_WIDE_INT)
|
|
|
|
{
|
|
|
|
/* Shifting by the host word size is undefined according to the
|
|
|
|
ANSI standard, so we must handle this as a special case. */
|
|
|
|
*hv = 0;
|
|
|
|
*lv = 0;
|
|
|
|
}
|
|
|
|
else if (count >= HOST_BITS_PER_WIDE_INT)
|
|
|
|
{
|
|
|
|
*hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
|
|
|
|
*lv = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
*hv = (((unsigned HOST_WIDE_INT) h1 << count)
|
|
|
|
| (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
|
|
|
|
*lv = l1 << count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sign extend all bits that are beyond the precision. */
|
|
|
|
|
|
|
|
signmask = -((prec > HOST_BITS_PER_WIDE_INT
|
|
|
|
? ((unsigned HOST_WIDE_INT) *hv
|
|
|
|
>> (prec - HOST_BITS_PER_WIDE_INT - 1))
|
|
|
|
: (*lv >> (prec - 1))) & 1);
|
|
|
|
|
|
|
|
if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
|
|
|
|
;
|
|
|
|
else if (prec >= HOST_BITS_PER_WIDE_INT)
|
|
|
|
{
|
|
|
|
*hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
|
|
|
|
*hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
*hv = signmask;
|
|
|
|
*lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
|
|
|
|
*lv |= signmask << prec;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shift the doubleword integer in L1, H1 right by COUNT places
|
|
|
|
keeping only PREC bits of result. Shift left if COUNT is negative.
|
|
|
|
ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
|
|
|
|
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
|
|
|
|
|
|
|
|
void
|
|
|
|
rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
|
|
|
|
HOST_WIDE_INT count, unsigned int prec,
|
|
|
|
unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
|
|
|
|
bool arith)
|
|
|
|
{
|
|
|
|
unsigned HOST_WIDE_INT signmask;
|
|
|
|
|
|
|
|
if (count < 0)
|
|
|
|
{
|
|
|
|
lshift_double (l1, h1, -count, prec, lv, hv, arith);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
signmask = (arith
|
|
|
|
? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
|
|
|
|
: 0);
|
|
|
|
|
|
|
|
if (SHIFT_COUNT_TRUNCATED)
|
|
|
|
count %= prec;
|
|
|
|
|
|
|
|
if (count >= 2 * HOST_BITS_PER_WIDE_INT)
|
|
|
|
{
|
|
|
|
/* Shifting by the host word size is undefined according to the
|
|
|
|
ANSI standard, so we must handle this as a special case. */
|
|
|
|
*hv = 0;
|
|
|
|
*lv = 0;
|
|
|
|
}
|
|
|
|
else if (count >= HOST_BITS_PER_WIDE_INT)
|
|
|
|
{
|
|
|
|
*hv = 0;
|
|
|
|
*lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
*hv = (unsigned HOST_WIDE_INT) h1 >> count;
|
|
|
|
*lv = ((l1 >> count)
|
|
|
|
| ((unsigned HOST_WIDE_INT) h1
|
|
|
|
<< (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Zero / sign extend all bits that are beyond the precision. */
|
|
|
|
|
|
|
|
if (count >= (HOST_WIDE_INT)prec)
|
|
|
|
{
|
|
|
|
*hv = signmask;
|
|
|
|
*lv = signmask;
|
|
|
|
}
|
|
|
|
else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
|
|
|
|
;
|
|
|
|
else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
|
|
|
|
{
|
|
|
|
*hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
|
|
|
|
*hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
*hv = signmask;
|
|
|
|
*lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
|
|
|
|
*lv |= signmask << (prec - count);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
|
|
|
|
for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
|
|
|
|
CODE is a tree code for a kind of division, one of
|
|
|
|
TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
|
|
|
|
or EXACT_DIV_EXPR
|
|
|
|
It controls how the quotient is rounded to an integer.
|
|
|
|
Return nonzero if the operation overflows.
|
|
|
|
UNS nonzero says do unsigned division. */
|
|
|
|
|
|
|
|
int
|
|
|
|
div_and_round_double (unsigned code, int uns,
|
|
|
|
/* num == numerator == dividend */
|
|
|
|
unsigned HOST_WIDE_INT lnum_orig,
|
|
|
|
HOST_WIDE_INT hnum_orig,
|
|
|
|
/* den == denominator == divisor */
|
|
|
|
unsigned HOST_WIDE_INT lden_orig,
|
|
|
|
HOST_WIDE_INT hden_orig,
|
|
|
|
unsigned HOST_WIDE_INT *lquo,
|
|
|
|
HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
|
|
|
|
HOST_WIDE_INT *hrem)
|
|
|
|
{
|
|
|
|
int quo_neg = 0;
|
|
|
|
HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
|
|
|
|
HOST_WIDE_INT den[4], quo[4];
|
|
|
|
int i, j;
|
|
|
|
unsigned HOST_WIDE_INT work;
|
|
|
|
unsigned HOST_WIDE_INT carry = 0;
|
|
|
|
unsigned HOST_WIDE_INT lnum = lnum_orig;
|
|
|
|
HOST_WIDE_INT hnum = hnum_orig;
|
|
|
|
unsigned HOST_WIDE_INT lden = lden_orig;
|
|
|
|
HOST_WIDE_INT hden = hden_orig;
|
|
|
|
int overflow = 0;
|
|
|
|
|
|
|
|
if (hden == 0 && lden == 0)
|
|
|
|
overflow = 1, lden = 1;
|
|
|
|
|
|
|
|
/* Calculate quotient sign and convert operands to unsigned. */
|
|
|
|
if (!uns)
|
|
|
|
{
|
|
|
|
if (hnum < 0)
|
|
|
|
{
|
|
|
|
quo_neg = ~ quo_neg;
|
|
|
|
/* (minimum integer) / (-1) is the only overflow case. */
|
|
|
|
if (neg_double (lnum, hnum, &lnum, &hnum)
|
|
|
|
&& ((HOST_WIDE_INT) lden & hden) == -1)
|
|
|
|
overflow = 1;
|
|
|
|
}
|
|
|
|
if (hden < 0)
|
|
|
|
{
|
|
|
|
quo_neg = ~ quo_neg;
|
|
|
|
neg_double (lden, hden, &lden, &hden);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hnum == 0 && hden == 0)
|
|
|
|
{ /* single precision */
|
|
|
|
*hquo = *hrem = 0;
|
|
|
|
/* This unsigned division rounds toward zero. */
|
|
|
|
*lquo = lnum / lden;
|
|
|
|
goto finish_up;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hnum == 0)
|
|
|
|
{ /* trivial case: dividend < divisor */
|
|
|
|
/* hden != 0 already checked. */
|
|
|
|
*hquo = *lquo = 0;
|
|
|
|
*hrem = hnum;
|
|
|
|
*lrem = lnum;
|
|
|
|
goto finish_up;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset (quo, 0, sizeof quo);
|
|
|
|
|
|
|
|
memset (num, 0, sizeof num); /* to zero 9th element */
|
|
|
|
memset (den, 0, sizeof den);
|
|
|
|
|
|
|
|
encode (num, lnum, hnum);
|
|
|
|
encode (den, lden, hden);
|
|
|
|
|
|
|
|
/* Special code for when the divisor < BASE. */
|
|
|
|
if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
|
|
|
|
{
|
|
|
|
/* hnum != 0 already checked. */
|
|
|
|
for (i = 4 - 1; i >= 0; i--)
|
|
|
|
{
|
|
|
|
work = num[i] + carry * BASE;
|
|
|
|
quo[i] = work / lden;
|
|
|
|
carry = work % lden;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Full double precision division,
|
|
|
|
with thanks to Don Knuth's "Seminumerical Algorithms". */
|
|
|
|
int num_hi_sig, den_hi_sig;
|
|
|
|
unsigned HOST_WIDE_INT quo_est, scale;
|
|
|
|
|
|
|
|
/* Find the highest nonzero divisor digit. */
|
|
|
|
for (i = 4 - 1;; i--)
|
|
|
|
if (den[i] != 0)
|
|
|
|
{
|
|
|
|
den_hi_sig = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Insure that the first digit of the divisor is at least BASE/2.
|
|
|
|
This is required by the quotient digit estimation algorithm. */
|
|
|
|
|
|
|
|
scale = BASE / (den[den_hi_sig] + 1);
|
|
|
|
if (scale > 1)
|
|
|
|
{ /* scale divisor and dividend */
|
|
|
|
carry = 0;
|
|
|
|
for (i = 0; i <= 4 - 1; i++)
|
|
|
|
{
|
|
|
|
work = (num[i] * scale) + carry;
|
|
|
|
num[i] = LOWPART (work);
|
|
|
|
carry = HIGHPART (work);
|
|
|
|
}
|
|
|
|
|
|
|
|
num[4] = carry;
|
|
|
|
carry = 0;
|
|
|
|
for (i = 0; i <= 4 - 1; i++)
|
|
|
|
{
|
|
|
|
work = (den[i] * scale) + carry;
|
|
|
|
den[i] = LOWPART (work);
|
|
|
|
carry = HIGHPART (work);
|
|
|
|
if (den[i] != 0) den_hi_sig = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
num_hi_sig = 4;
|
|
|
|
|
|
|
|
/* Main loop */
|
|
|
|
for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
|
|
|
|
{
|
|
|
|
/* Guess the next quotient digit, quo_est, by dividing the first
|
|
|
|
two remaining dividend digits by the high order quotient digit.
|
|
|
|
quo_est is never low and is at most 2 high. */
|
|
|
|
unsigned HOST_WIDE_INT tmp;
|
|
|
|
|
|
|
|
num_hi_sig = i + den_hi_sig + 1;
|
|
|
|
work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
|
|
|
|
if (num[num_hi_sig] != den[den_hi_sig])
|
|
|
|
quo_est = work / den[den_hi_sig];
|
|
|
|
else
|
|
|
|
quo_est = BASE - 1;
|
|
|
|
|
|
|
|
/* Refine quo_est so it's usually correct, and at most one high. */
|
|
|
|
tmp = work - quo_est * den[den_hi_sig];
|
|
|
|
if (tmp < BASE
|
|
|
|
&& (den[den_hi_sig - 1] * quo_est
|
|
|
|
> (tmp * BASE + num[num_hi_sig - 2])))
|
|
|
|
quo_est--;
|
|
|
|
|
|
|
|
/* Try QUO_EST as the quotient digit, by multiplying the
|
|
|
|
divisor by QUO_EST and subtracting from the remaining dividend.
|
|
|
|
Keep in mind that QUO_EST is the I - 1st digit. */
|
|
|
|
|
|
|
|
carry = 0;
|
|
|
|
for (j = 0; j <= den_hi_sig; j++)
|
|
|
|
{
|
|
|
|
work = quo_est * den[j] + carry;
|
|
|
|
carry = HIGHPART (work);
|
|
|
|
work = num[i + j] - LOWPART (work);
|
|
|
|
num[i + j] = LOWPART (work);
|
|
|
|
carry += HIGHPART (work) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If quo_est was high by one, then num[i] went negative and
|
|
|
|
we need to correct things. */
|
|
|
|
if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
|
|
|
|
{
|
|
|
|
quo_est--;
|
|
|
|
carry = 0; /* add divisor back in */
|
|
|
|
for (j = 0; j <= den_hi_sig; j++)
|
|
|
|
{
|
|
|
|
work = num[i + j] + den[j] + carry;
|
|
|
|
carry = HIGHPART (work);
|
|
|
|
num[i + j] = LOWPART (work);
|
|
|
|
}
|
|
|
|
|
|
|
|
num [num_hi_sig] += carry;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Store the quotient digit. */
|
|
|
|
quo[i] = quo_est;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
decode (quo, lquo, hquo);
|
|
|
|
|
|
|
|
finish_up:
|
|
|
|
/* If result is negative, make it so. */
|
|
|
|
if (quo_neg)
|
|
|
|
neg_double (*lquo, *hquo, lquo, hquo);
|
|
|
|
|
|
|
|
/* Compute trial remainder: rem = num - (quo * den) */
|
|
|
|
mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
|
|
|
|
neg_double (*lrem, *hrem, lrem, hrem);
|
|
|
|
add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
|
|
|
|
|
|
|
|
switch (code)
|
|
|
|
{
|
|
|
|
case TRUNC_DIV_EXPR:
|
|
|
|
case TRUNC_MOD_EXPR: /* round toward zero */
|
|
|
|
case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
|
|
|
|
return overflow;
|
|
|
|
|
|
|
|
case FLOOR_DIV_EXPR:
|
|
|
|
case FLOOR_MOD_EXPR: /* round toward negative infinity */
|
|
|
|
if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
|
|
|
|
{
|
|
|
|
/* quo = quo - 1; */
|
|
|
|
add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
|
|
|
|
lquo, hquo);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return overflow;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CEIL_DIV_EXPR:
|
|
|
|
case CEIL_MOD_EXPR: /* round toward positive infinity */
|
|
|
|
if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
|
|
|
|
{
|
|
|
|
add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
|
|
|
|
lquo, hquo);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return overflow;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ROUND_DIV_EXPR:
|
|
|
|
case ROUND_MOD_EXPR: /* round to closest integer */
|
|
|
|
{
|
|
|
|
unsigned HOST_WIDE_INT labs_rem = *lrem;
|
|
|
|
HOST_WIDE_INT habs_rem = *hrem;
|
|
|
|
unsigned HOST_WIDE_INT labs_den = lden, ltwice;
|
|
|
|
HOST_WIDE_INT habs_den = hden, htwice;
|
|
|
|
|
|
|
|
/* Get absolute values. */
|
|
|
|
if (*hrem < 0)
|
|
|
|
neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
|
|
|
|
if (hden < 0)
|
|
|
|
neg_double (lden, hden, &labs_den, &habs_den);
|
|
|
|
|
|
|
|
/* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */
|
|
|
|
mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
|
|
|
|
labs_rem, habs_rem, <wice, &htwice);
|
|
|
|
|
|
|
|
if (((unsigned HOST_WIDE_INT) habs_den
|
|
|
|
< (unsigned HOST_WIDE_INT) htwice)
|
|
|
|
|| (((unsigned HOST_WIDE_INT) habs_den
|
|
|
|
== (unsigned HOST_WIDE_INT) htwice)
|
|
|
|
&& (labs_den <= ltwice)))
|
|
|
|
{
|
|
|
|
if (*hquo < 0)
|
|
|
|
/* quo = quo - 1; */
|
|
|
|
add_double (*lquo, *hquo,
|
|
|
|
(HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
|
|
|
|
else
|
|
|
|
/* quo = quo + 1; */
|
|
|
|
add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
|
|
|
|
lquo, hquo);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return overflow;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
gcc_unreachable ();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compute true remainder: rem = num - (quo * den) */
|
|
|
|
mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
|
|
|
|
neg_double (*lrem, *hrem, lrem, hrem);
|
|
|
|
add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
|
|
|
|
return overflow;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-03-03 01:38:20 +01:00
|
|
|
/* Returns mask for PREC bits. */
|
|
|
|
|
re PR tree-optimization/30730 (-Wunsafe-loop-optimizations gives too many warnings)
PR tree-optimization/30730
PR tree-optimization/26900
* tree-ssa-loop-niter.c: Include gmp.h.
(bounds): New type.
(mpz_set_double_int, get_type_bounds, mpz_to_double_int,
split_to_var_and_offset, determine_value_range,
bound_difference_of_offsetted_base, refine_bounds_using_guard,
bound_difference, bounds_add, bounds_negate,
number_of_iterations_ne_max, dump_affine_iv): New functions.
(number_of_iterations_ne, number_of_iterations_lt_to_ne,
assert_loop_rolls_lt, assert_loop_rolls_le): Use bounds on the
difference of initial and final value of control iv to validate
results.
(number_of_iterations_cond): Add loop parameter. Determine bounds
on the difference of the extremes of the control iv. Add dumps.
(expand_simple_operations): Handle phi nodes.
(simplify_using_initial_conditions): Do not record used conditions.
(number_of_iterations_exit): Pass loop to number_of_iterations_cond.
Do not set additional_info.
(implies_nonnegative_p, implies_ge_p): Removed.
(derive_constant_upper_bound): Do not use parameter `additional'.
(record_estimate): Parameter `additional' removed. Parameter
`i_bound' added. Do not call derive_constant_upper_bound.
(record_nonwrapping_iv): Use derive_constant_upper_bound to
bound the number of iterations estimate.
(estimate_numbers_of_iterations_loop): Pass the estimate from
the number of iterations analysis to record_estimate.
* tree.h (multiple_of_p): Declare.
* tree-scalar-evolution.c (expression_expensive_p): Removed.
(scev_const_prop): Do not check expression_expensive_p.
* fold-const.c (multiple_of_p): Exported.
* double-int.c (double_int_mask): Exported.
* double-int.h (double_int_mask): Declare.
* tree-flow.h (struct tree_niter_desc): Removed additional_info
field. Added max field.
* gcc.dg/tree-ssa/loop-26.c: New test.
From-SVN: r122896
2007-03-14 01:38:34 +01:00
|
|
|
double_int
|
2006-03-03 01:38:20 +01:00
|
|
|
double_int_mask (unsigned prec)
|
|
|
|
{
|
|
|
|
unsigned HOST_WIDE_INT m;
|
|
|
|
double_int mask;
|
|
|
|
|
|
|
|
if (prec > HOST_BITS_PER_WIDE_INT)
|
|
|
|
{
|
|
|
|
prec -= HOST_BITS_PER_WIDE_INT;
|
|
|
|
m = ((unsigned HOST_WIDE_INT) 2 << (prec - 1)) - 1;
|
|
|
|
mask.high = (HOST_WIDE_INT) m;
|
|
|
|
mask.low = ALL_ONES;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
mask.high = 0;
|
|
|
|
mask.low = ((unsigned HOST_WIDE_INT) 2 << (prec - 1)) - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clears the bits of CST over the precision PREC. If UNS is false, the bits
|
|
|
|
outside of the precision are set to the sign bit (i.e., the PREC-th one),
|
|
|
|
otherwise they are set to zero.
|
2009-11-25 11:55:54 +01:00
|
|
|
|
2006-03-03 01:38:20 +01:00
|
|
|
This corresponds to returning the value represented by PREC lowermost bits
|
|
|
|
of CST, with the given signedness. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_ext (double_int cst, unsigned prec, bool uns)
|
|
|
|
{
|
|
|
|
if (uns)
|
|
|
|
return double_int_zext (cst, prec);
|
|
|
|
else
|
|
|
|
return double_int_sext (cst, prec);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The same as double_int_ext with UNS = true. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_zext (double_int cst, unsigned prec)
|
|
|
|
{
|
|
|
|
double_int mask = double_int_mask (prec);
|
|
|
|
double_int r;
|
|
|
|
|
2006-06-15 11:42:03 +02:00
|
|
|
r.low = cst.low & mask.low;
|
|
|
|
r.high = cst.high & mask.high;
|
2006-03-03 01:38:20 +01:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The same as double_int_ext with UNS = false. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_sext (double_int cst, unsigned prec)
|
|
|
|
{
|
|
|
|
double_int mask = double_int_mask (prec);
|
|
|
|
double_int r;
|
|
|
|
unsigned HOST_WIDE_INT snum;
|
|
|
|
|
|
|
|
if (prec <= HOST_BITS_PER_WIDE_INT)
|
|
|
|
snum = cst.low;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
prec -= HOST_BITS_PER_WIDE_INT;
|
|
|
|
snum = (unsigned HOST_WIDE_INT) cst.high;
|
|
|
|
}
|
|
|
|
if (((snum >> (prec - 1)) & 1) == 1)
|
|
|
|
{
|
2006-06-15 11:42:03 +02:00
|
|
|
r.low = cst.low | ~mask.low;
|
|
|
|
r.high = cst.high | ~mask.high;
|
2006-03-03 01:38:20 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2006-06-15 11:42:03 +02:00
|
|
|
r.low = cst.low & mask.low;
|
|
|
|
r.high = cst.high & mask.high;
|
2009-11-25 11:55:54 +01:00
|
|
|
}
|
2006-03-03 01:38:20 +01:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns true if CST fits in signed HOST_WIDE_INT. */
|
|
|
|
|
|
|
|
bool
|
|
|
|
double_int_fits_in_shwi_p (double_int cst)
|
|
|
|
{
|
|
|
|
if (cst.high == 0)
|
|
|
|
return (HOST_WIDE_INT) cst.low >= 0;
|
|
|
|
else if (cst.high == -1)
|
|
|
|
return (HOST_WIDE_INT) cst.low < 0;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns true if CST fits in HOST_WIDE_INT if UNS is false, or in
|
|
|
|
unsigned HOST_WIDE_INT if UNS is true. */
|
|
|
|
|
|
|
|
bool
|
|
|
|
double_int_fits_in_hwi_p (double_int cst, bool uns)
|
|
|
|
{
|
|
|
|
if (uns)
|
|
|
|
return double_int_fits_in_uhwi_p (cst);
|
|
|
|
else
|
|
|
|
return double_int_fits_in_shwi_p (cst);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns A * B. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_mul (double_int a, double_int b)
|
|
|
|
{
|
|
|
|
double_int ret;
|
|
|
|
mul_double (a.low, a.high, b.low, b.high, &ret.low, &ret.high);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-11-09 03:01:06 +01:00
|
|
|
/* Returns A * B. If the operation overflows according to UNSIGNED_P,
|
|
|
|
*OVERFLOW is set to nonzero. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_mul_with_sign (double_int a, double_int b,
|
|
|
|
bool unsigned_p, int *overflow)
|
|
|
|
{
|
|
|
|
double_int ret;
|
|
|
|
*overflow = mul_double_with_sign (a.low, a.high, b.low, b.high,
|
|
|
|
&ret.low, &ret.high, unsigned_p);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-03-03 01:38:20 +01:00
|
|
|
/* Returns A + B. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_add (double_int a, double_int b)
|
|
|
|
{
|
|
|
|
double_int ret;
|
|
|
|
add_double (a.low, a.high, b.low, b.high, &ret.low, &ret.high);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-07-05 15:06:07 +02:00
|
|
|
/* Returns A - B. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_sub (double_int a, double_int b)
|
|
|
|
{
|
|
|
|
double_int ret;
|
|
|
|
neg_double (b.low, b.high, &b.low, &b.high);
|
|
|
|
add_double (a.low, a.high, b.low, b.high, &ret.low, &ret.high);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-03-03 01:38:20 +01:00
|
|
|
/* Returns -A. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_neg (double_int a)
|
|
|
|
{
|
|
|
|
double_int ret;
|
|
|
|
neg_double (a.low, a.high, &ret.low, &ret.high);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns A / B (computed as unsigned depending on UNS, and rounded as
|
|
|
|
specified by CODE). CODE is enum tree_code in fact, but double_int.h
|
re PR tree-optimization/28411 ("Illegal instruction" error with -ftrapv)
PR tree-optimization/28411
* double-int.c (double_int_div): Use double_int_divmod.
(double_int_divmod, double_int_sdivmod, double_int_udivmod,
double_int_mod, double_int_smod, double_int_umod): New functions.
* double-int.h (double_int_divmod, double_int_sdivmod,
double_int_udivmod, double_int_mod, double_int_smod, double_int_umod):
Declare.
* tree-ssa-loop-ivopts.c (constant_multiple_of): Returns the result
in double_int.
(get_computation_aff, get_computation_cost_at): Handle double_int
return type of constant_multiple_of.
From-SVN: r116529
2006-08-28 23:15:19 +02:00
|
|
|
must be included before tree.h. The remainder after the division is
|
|
|
|
stored to MOD. */
|
2006-03-03 01:38:20 +01:00
|
|
|
|
|
|
|
double_int
|
re PR tree-optimization/28411 ("Illegal instruction" error with -ftrapv)
PR tree-optimization/28411
* double-int.c (double_int_div): Use double_int_divmod.
(double_int_divmod, double_int_sdivmod, double_int_udivmod,
double_int_mod, double_int_smod, double_int_umod): New functions.
* double-int.h (double_int_divmod, double_int_sdivmod,
double_int_udivmod, double_int_mod, double_int_smod, double_int_umod):
Declare.
* tree-ssa-loop-ivopts.c (constant_multiple_of): Returns the result
in double_int.
(get_computation_aff, get_computation_cost_at): Handle double_int
return type of constant_multiple_of.
From-SVN: r116529
2006-08-28 23:15:19 +02:00
|
|
|
double_int_divmod (double_int a, double_int b, bool uns, unsigned code,
|
|
|
|
double_int *mod)
|
2006-03-03 01:38:20 +01:00
|
|
|
{
|
|
|
|
double_int ret;
|
|
|
|
|
fold-const.c (LOWPART, [...]): Move ...
2010-04-15 Richard Guenther <rguenther@suse.de>
* fold-const.c (LOWPART, HIGHPART, BASE, encode, decode,
fit_double_type, force_fit_type_double, add_double_with_sign,
neg_double, mul_double_with_sign, lshift_double, rshift_double,
lrotate_double, rrotate_double, div_and_round_double): Move ...
* double-int.c: ... here.
* tree.h (force_fit_type_double, fit_double_type, add_double_with_sign,
add_double, neg_double, mul_double_with_sign, mul_double,
lshift_double, rshift_double, lrotate_double, rrotate_double,
div_and_round_double): Move prototypes ...
* double-int.h: ... here.
From-SVN: r158372
2010-04-15 14:45:58 +02:00
|
|
|
div_and_round_double (code, uns, a.low, a.high,
|
Fix enum conversions which are invalid in C++:
gcc/:
Fix enum conversions which are invalid in C++:
* auto-inc-dec.c (attempt_change): Change 0 to SET in function
call.
* calls.c (store_one_arg): Change 0 to EXPAND_NORMAL in function
call.
* cse.c (hash_rtx_cb): Change 0 to VOIDmode in function call.
* dbgcnt.c (dbg_cnt_set_limit_by_name): Add cast to enum type.
* dbxout.c (dbxout_symbol): Change 0 to VOIDmode in function
call.
(dbxout_parms): Likewise.
* df-core.c (df_set_flags): Change changeable_flags parameter to
int.
(df_clear_flags): Likewise.
* df-problems.c (df_rd_bb_local_compute_process_def): Change
top_flag parameter to int.
(df_chain_create_bb_process_use): Likewise.
(df_chain_add_problem): Change chain_flags parameter to unsigned
int. Remove cast.
* df-scan.c (df_ref_create): Change ref_flags parameter to int.
(df_ref_create_structure, df_def_record_1): Likewise.
(df_defs_record, df_uses_record, df_get_call_refs): Likewise.
(df_notes_rescan): Change 0 to VOIDmode in function call.
(df_get_call_refs, df_insn_refs_collect): Likewise.
(df_bb_regs_collect): Likewise.
(df_entry_block_defs_collect): Likewise.
(df_exit_block_uses_collect): Likewise.
* df.h: Update declarations.
* double-int.c (double_int_divmod): Add cast to enum type.
* dse.c (replace_inc_dec): Reverse parameters to gen_int_mode.
* dwarf2out.c (new_reg_loc_descr): Add casts to enum type.
(based_loc_descr): Likewise.
(loc_descriptor_from_tree_1): Change first_op and second_op to
enum dwarf_location_atom. Add cast to enum type.
* expmed.c (init_expmed): Change 0 to SET in function call.
* expr.c (init_expr_target): Change 0 to VOIDmode in function
call.
(expand_expr_real_1): Change 0 to EXPAND_NORMAL in function call.
(do_store_flag): Likewise.
* fixed-value.h (struct fixed_value): Change mode to enum
machine_mode.
* function.c (assign_parms): Change 0 to VOIDmode in function
call.
* genautomata.c (insert_automaton_decl): Change 1 to INSERT in
function call.
(insert_insn_decl, insert_decl, insert_state): Likewise.
(automata_list_finish): Likewise.
* genrecog.c (process_define_predicate): Add cast to enum type.
* gensupport.c (init_predicate_table): Add cast to enum type.
* gimple.c (gimple_build_return): Change 0 to ERROR_MARK in
function call.
(gimple_build_call_1, gimple_build_label): Likewise.
(gimple_build_goto, gimple_build_asm_1): Likewise.
(gimple_build_switch_1, gimple_build_cdt): Likewise.
* gimple.h (GIMPLE_CHECK): Change 0 to ERROR_MARK in function
call.
(enum fallback): Rename from enum fallback_t.
(fallback_t): Typedef as int.
* gimple-low.c (lower_builtin_setjmp): Change TSI_SAME_STMT to
GSI_SAME_STMT in function call.
* ira.c (setup_class_subset_and_memory_move_costs): Add casts to
enum type.
(setup_reg_class_relations): Likewise.
(setup_reg_class_nregs): Change cl to int. Add casts to enum
type.
(setup_prohibited_class_mode_regs): Add cast to enum type.
(setup_prohibited_mode_move_regs): Likewise.
* ira-costs.c (record_reg_classes): Change rclass to enum
reg_class.
(record_address_regs): Change i to enum reg_class.
* lists.c (alloc_EXPR_LIST): Add cast to enum type.
* machmode.h (GET_MODE_CLASS): Cast value to enum mode_class.
(GET_MODE_WIDER_MODE): Cast value to enum machine_mode.
(GET_MODE_2XWIDER_MODE): Likewise.
(GET_CLASS_NARROWEST_MODE): Likewise.
* omp-low.c (expand_omp_for): Add cast to enum type.
* optabs.c (debug_optab_libfuncs): Add casts to enum type.
* opts.c (enable_warning_as_error): Change kind to diagostic_t.
* postreload.c (reload_cse_simplify_operands): Change rclass local
to enum reg_class.
* predict.c (combine_predictions_for_insn): Change best_predictor
and predictor to enum br_predictor.
(combine_predictions_for_bb): Likewise.
(build_predict_expr): Change assignment to PREDICT_EXPR_OUTCOME to
use SET_PREDICT_EXPR_OUTCOME.
* real.c (real_arithmetic): Change icode to code in function
call.
* reginfo.c (init_move_cost): Add casts to enum type.
(init_reg_sets_1, init_fake_stack_mems): Likewise.
* regmove.c (regclass_compatible_p): Change class0 and class1 to
enum reg_class.
* reload.c (find_valid_class): Add casts to enum type.
(push_reload): Change 0 to NO_REGS in function call.
(find_reloads): Change this_alternative to array of enum
reg_class. Remove some now-unnecessary casts.
(make_memloc): Change 0 to VOIDmode in function call.
* reload1.c (reload): Change 0 to VOIDmode in function call.
(eliminate_regs_1, elimination_effects): Likewise.
(eliminate_regs_in_insn): Likewise.
(emit_input_reload_insns): Add cast to enum type.
(delete_output_reload): Change 0 to VOIDmode in function call.
* reorg.c (insn_sets_resource_p): Convert include_delayed_effects
to enum type in function call.
* tree.h (PREDICT_EXPR_OUTCOME): Add cast to enum type.
(SET_PREDICT_EXPR_OUTCOME): Define.
* tree-dump.c (get_dump_file_info): Change phase parameter to
int.
(get_dump_file_name, dump_begin, dump_enabled_p): Likewise.
(dump_initialized_p, dump_flag_name, dump_end): Likewise.
(dump_function): Likewise.
* tree-dump.h: Update declarations.
* tree-pass.h: Update declarations.
* varasm.c (assemble_integer): Change mclass to enum mode_class.
* config/arm/arm.c (thumb_legitimize_reload_address): Add cast to
enum type.
(arm_rtx_costs_1): Correct parenthesization.
(arm_rtx_costs): Add casts to enum type.
(adjacent_mem_locations): Reverse arguments to const_ok_for_op.
(vfp_emit_fstmd): Use add_rg_note.
(emit_multi_reg_push, emit_sfm): Likewise.
(thumb_set_frame_pointer): Likewise.
(arm_expand_prologue): Likewise.
(arm_regno_class): Change return type to enum reg_class.
(thumb1_expand_prologue): Use add_reg_note.
* config/arm/arm-protos.h (arm_regno_class): Update declaration.
* config/arm/arm.h (INITIALIZE_TRAMPOLINE): Change 0 to LCT_NORMAL
in function call.
* config/arm/gentune.sh: Add cast to enum type.
* config/arm/arm-tune.md: Rebuild.
* config/i386/i386.c (ix86_expand_prologue): Use add_reg_note.
(ix86_split_fp_branch, predict_jump): Likewise.
(ix86_expand_multi_arg_builtin): Change sub_code from enum
insn_code to enum rtx_code.
(ix86_builtin_vectorized_function): Add cast to enum type.
* config/i386/i386.md (truncdfsf2): Change slot to enum
ix86_stack_slot.
(truncxf<mode>2, isinf<mode>2): Likewise.
* config/i386/i386-c.c (ix86_pragma_target_parse): Add cast to
enum type.
* config/ia64/ia64.c (ia64_split_tmode_move): Use add_reg_note.
(spill_restore_mem, do_spill, ia64_expand_prologue): Likewise.
(insert_bundle_state): Change 1 to INSERT in function call.
(ia64_add_bundle_selector_before): Likewise.
* config/ia64/ia64.md (cpu attr): Add cast to enum type.
(save_stack_nonlocal): Change 0 to LCT_NORMAL in function call.
(restore_stack_nonlocal): Likewise.
* config/mips/mips.h (MIPS_ICACHE_SYNC): Change 0 to LCT_NORMAL in
function call.
* config/mips/mips.c (mips_binary_cost): Change 0 to SET in
function call.
(mips_rtx_costs): Likewise.
(mips_override_options): Add casts to enum type.
* config/mips/sdemtk.h (MIPS_ICACHE_SYNC): Change 0 to LCT_NORMAL
in function call.
* config/pa/pa.c (legitimize_pic_address): Use add_reg_note.
(store_reg, set_reg_plus_d): Likewise.
(hppa_expand_prologue, hppa_profile_hook): Likewise.
* config/rs6000/rs6000.c (rs6000_init_hard_regno_mode_ok): Add
cast to enum type.
(altivec_expand_vec_set_builtin): Change 0 to EXPAND_NORMAL in
function call.
(emit_unlikely_jump): Use add_reg_note.
(rs6000_emit_allocate_stack): Likewise.
(rs6000_frame_related, rs6000_emit_prologue): Likewise.
(output_toc): Change 1 to INSERT in function call.
(output_profile_hook): Change 0 to LCT_NORMAL in function call.
(rs6000_initialize_trampoline): Likewise.
(rs6000_init_dwarf_reg_sizes_extra): Change 0 to EXPAND_NORMAL in
function call.
* config/s390/s390.c (s390_rtx_costs): Add cast to enum type.
(s390_expand_movmem): Change 0 to OPTAB_DIRECT in function call.
(s390_expand_setmem, s390_expand_cmpmem): Likewise.
(save_gprs): Use add_reg_note.
(s390_emit_prologue): Likewise.
(s390_expand_builtin): Change 0 to EXPAND_NORMAL in function
call.
* config/sparc/sparc.c (sparc_expand_prologue): Use add_reg_note.
(sparc_fold_builtin): Add cast to enum type.
* config/spu/spu.c (spu_emit_branch_or_set): Change ior_code to
enum insn_code.
(spu_expand_prologue): Use add_reg_note.
(expand_builtin_args): Change 0 to EXPAND_NORMAL in function
call.
* c-parser.c (c_parser_attributes): Change VEC back to tree list.
(c_parser_postfix_expression_after_primary): Get VEC for list of
arguments. Get original types of arguments. Call
build_function_call_vec.
(cached_expr_list_1, cached_expr_list_2): New static variables.
(c_parser_expr_list): Change return type to VEC *. Add
p_orig_types parameter. Change all callers.
(c_parser_release_expr): New static function.
(c_parser_vec_to_tree_list): New static function.
* c-typeck.c (build_function_call): Rewrite to build a VEC and
call build_function_call_vec.
(build_function_call_vec): New function, based on old
build_function_call.
(convert_arguments): Remove nargs and argarray parameters. Change
values to a VEC. Add origtypes parameter.
(build_modify_expr): Add rhs_origtype parameter. Change all
callers.
(convert_for_assignment): Add origtype parameter. Change all
callers. If warn_cxx_compat, check for conversion to an enum
type when calling a function.
(store_init_value): Add origtype parameter. Change all callers.
(digest_init): Likewise.
(struct init_node): Add origtype field.
(add_pending_init): Add origtype parameter. Change all callers.
(output_init_element): Likewise.
(output_pending_init_elements): Pass origtype from init_node to
output_init_element.
(process_init_elemnt): Pass origtype from c_expr to
output_init_element.
(c_finish_return): Add origtype parameter. Change all callers.
* c-common.c (sync_resolve_size): Change params to VEC *. Change
caller.
(sync_resolve_params): Likewise.
(sync_resolve_return): Change params to first_param. Change
caller.
(resolve_overloaded_builtins): Change params to VEC *. Change
callers. Save first parameter around call to
build_function_call_vec.
* c-decl.c (finish_decl): Add origtype parameter. Change all
callers. Call build_function_call_vec rather than
build_function_call for cleanup.
* c-tree.h: Update declarations.
* c-common.h: Update declarations.
* stub-objc.c (objc_rewrite_function_call): Change parameter from
params to first_param.
* target.h (struct gcc_target): Change resolve_overloaded_builtin
params parameter from tree to void *.
* config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin):
Change arglist parameter to have type void *, and to be a pointer
to a VEC.
* config/rs6000/rs6000-protos.h
(altivec_resolve_overloaded_builtin): Update declaration.
* config/spu/spu-c.c (spu_resolved_overloaded_builtin): Change
fnargs parameter to have type void *, and to be a pointer to a
VEC. Call build_function_call_vec instead of
build_function_call.
* config/spu/spu-protos.h (spu_expand_builtin): Update
declaration.
gcc/cp/:
* typeck.c (build_function_call_vec): New function.
(cp_build_function_call): Only pass first parameter to
objc_rewrite_function_call.
(build_modify_expr): Add rhs_origtype parameter. Change all
callers.
* decl.c (finish_decl): Add origtype parameter. Change all
callers.
* semantics.c (finish_call_expr): Pass VEC to
resolve_overloaded_builtin.
gcc/objc:
* objc-act.c (objc_rewrite_function_call): Change parameter from
params to first_param. Change all callers.
gcc/testsuite:
* gcc.dg/Wcxx-compat-3.c: New testcase.
From-SVN: r146451
2009-04-20 21:35:00 +02:00
|
|
|
b.low, b.high, &ret.low, &ret.high,
|
|
|
|
&mod->low, &mod->high);
|
2006-03-03 01:38:20 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
re PR tree-optimization/28411 ("Illegal instruction" error with -ftrapv)
PR tree-optimization/28411
* double-int.c (double_int_div): Use double_int_divmod.
(double_int_divmod, double_int_sdivmod, double_int_udivmod,
double_int_mod, double_int_smod, double_int_umod): New functions.
* double-int.h (double_int_divmod, double_int_sdivmod,
double_int_udivmod, double_int_mod, double_int_smod, double_int_umod):
Declare.
* tree-ssa-loop-ivopts.c (constant_multiple_of): Returns the result
in double_int.
(get_computation_aff, get_computation_cost_at): Handle double_int
return type of constant_multiple_of.
From-SVN: r116529
2006-08-28 23:15:19 +02:00
|
|
|
/* The same as double_int_divmod with UNS = false. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_sdivmod (double_int a, double_int b, unsigned code, double_int *mod)
|
|
|
|
{
|
|
|
|
return double_int_divmod (a, b, false, code, mod);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The same as double_int_divmod with UNS = true. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_udivmod (double_int a, double_int b, unsigned code, double_int *mod)
|
|
|
|
{
|
|
|
|
return double_int_divmod (a, b, true, code, mod);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns A / B (computed as unsigned depending on UNS, and rounded as
|
|
|
|
specified by CODE). CODE is enum tree_code in fact, but double_int.h
|
|
|
|
must be included before tree.h. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_div (double_int a, double_int b, bool uns, unsigned code)
|
|
|
|
{
|
|
|
|
double_int mod;
|
|
|
|
|
|
|
|
return double_int_divmod (a, b, uns, code, &mod);
|
|
|
|
}
|
|
|
|
|
2006-03-03 01:38:20 +01:00
|
|
|
/* The same as double_int_div with UNS = false. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_sdiv (double_int a, double_int b, unsigned code)
|
|
|
|
{
|
|
|
|
return double_int_div (a, b, false, code);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The same as double_int_div with UNS = true. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_udiv (double_int a, double_int b, unsigned code)
|
|
|
|
{
|
|
|
|
return double_int_div (a, b, true, code);
|
|
|
|
}
|
|
|
|
|
re PR tree-optimization/28411 ("Illegal instruction" error with -ftrapv)
PR tree-optimization/28411
* double-int.c (double_int_div): Use double_int_divmod.
(double_int_divmod, double_int_sdivmod, double_int_udivmod,
double_int_mod, double_int_smod, double_int_umod): New functions.
* double-int.h (double_int_divmod, double_int_sdivmod,
double_int_udivmod, double_int_mod, double_int_smod, double_int_umod):
Declare.
* tree-ssa-loop-ivopts.c (constant_multiple_of): Returns the result
in double_int.
(get_computation_aff, get_computation_cost_at): Handle double_int
return type of constant_multiple_of.
From-SVN: r116529
2006-08-28 23:15:19 +02:00
|
|
|
/* Returns A % B (computed as unsigned depending on UNS, and rounded as
|
|
|
|
specified by CODE). CODE is enum tree_code in fact, but double_int.h
|
|
|
|
must be included before tree.h. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_mod (double_int a, double_int b, bool uns, unsigned code)
|
|
|
|
{
|
|
|
|
double_int mod;
|
|
|
|
|
|
|
|
double_int_divmod (a, b, uns, code, &mod);
|
|
|
|
return mod;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The same as double_int_mod with UNS = false. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_smod (double_int a, double_int b, unsigned code)
|
|
|
|
{
|
|
|
|
return double_int_mod (a, b, false, code);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The same as double_int_mod with UNS = true. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_umod (double_int a, double_int b, unsigned code)
|
|
|
|
{
|
|
|
|
return double_int_mod (a, b, true, code);
|
|
|
|
}
|
|
|
|
|
2010-04-20 18:33:46 +02:00
|
|
|
/* Set BITPOS bit in A. */
|
|
|
|
double_int
|
|
|
|
double_int_setbit (double_int a, unsigned bitpos)
|
|
|
|
{
|
|
|
|
if (bitpos < HOST_BITS_PER_WIDE_INT)
|
|
|
|
a.low |= (unsigned HOST_WIDE_INT) 1 << bitpos;
|
|
|
|
else
|
|
|
|
a.high |= (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
|
|
|
|
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
|
2010-08-04 11:15:51 +02:00
|
|
|
/* Count trailing zeros in A. */
|
|
|
|
int
|
|
|
|
double_int_ctz (double_int a)
|
|
|
|
{
|
|
|
|
unsigned HOST_WIDE_INT w = a.low ? a.low : (unsigned HOST_WIDE_INT) a.high;
|
|
|
|
unsigned bits = a.low ? 0 : HOST_BITS_PER_WIDE_INT;
|
|
|
|
if (!w)
|
|
|
|
return HOST_BITS_PER_DOUBLE_INT;
|
2010-08-05 21:41:31 +02:00
|
|
|
bits += ctz_hwi (w);
|
2010-08-04 11:15:51 +02:00
|
|
|
return bits;
|
|
|
|
}
|
|
|
|
|
2010-04-15 00:05:32 +02:00
|
|
|
/* Shift A left by COUNT places keeping only PREC bits of result. Shift
|
|
|
|
right if COUNT is negative. ARITH true specifies arithmetic shifting;
|
|
|
|
otherwise use logical shift. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_lshift (double_int a, HOST_WIDE_INT count, unsigned int prec, bool arith)
|
|
|
|
{
|
|
|
|
double_int ret;
|
|
|
|
lshift_double (a.low, a.high, count, prec, &ret.low, &ret.high, arith);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shift A rigth by COUNT places keeping only PREC bits of result. Shift
|
|
|
|
left if COUNT is negative. ARITH true specifies arithmetic shifting;
|
|
|
|
otherwise use logical shift. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_rshift (double_int a, HOST_WIDE_INT count, unsigned int prec, bool arith)
|
|
|
|
{
|
|
|
|
double_int ret;
|
|
|
|
rshift_double (a.low, a.high, count, prec, &ret.low, &ret.high, arith);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
double-int.h (double_int_to_shwi, [...]): Implement as static inline.
* double-int.h (double_int_to_shwi, double_int_to_uhwi,
double_int_fits_in_uhwi_p): Implement as static inline.
(double_int_xor): New inline function.
(double_int_lrotate, double_int_rrotate, double_int_max,
double_int_umax, double_int_smax, double_int_min, double_int_umin,
double_int_smin): Declare.
(lrotate_double, rrotate_double): Remove declaration.
* double-int.c (double_int_fits_in_uhwi_p, double_int_to_shwi,
double_int_to_uhwi, lrotate_double, rrotate_double): Remove function.
(double_int_lrotate, double_int_rrotate, double_int_max,
double_int_umax, double_int_smax, double_int_min, double_int_umin,
double_int_smin): New function.
* fold-const.c (int_const_binop): Clean up, use double_int_*
functions.
* simplify-rtx.c (simplify_const_binary_operation): Clean up, use
double_int_* and immed_double_int_const functions.
From-SVN: r161002
2010-06-18 17:58:48 +02:00
|
|
|
/* Rotate A left by COUNT places keeping only PREC bits of result.
|
|
|
|
Rotate right if COUNT is negative. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_lrotate (double_int a, HOST_WIDE_INT count, unsigned int prec)
|
|
|
|
{
|
|
|
|
double_int t1, t2;
|
|
|
|
|
|
|
|
count %= prec;
|
|
|
|
if (count < 0)
|
|
|
|
count += prec;
|
|
|
|
|
|
|
|
t1 = double_int_lshift (a, count, prec, false);
|
|
|
|
t2 = double_int_rshift (a, prec - count, prec, false);
|
|
|
|
|
|
|
|
return double_int_ior (t1, t2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Rotate A rigth by COUNT places keeping only PREC bits of result.
|
|
|
|
Rotate right if COUNT is negative. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_rrotate (double_int a, HOST_WIDE_INT count, unsigned int prec)
|
|
|
|
{
|
|
|
|
double_int t1, t2;
|
|
|
|
|
|
|
|
count %= prec;
|
|
|
|
if (count < 0)
|
|
|
|
count += prec;
|
|
|
|
|
|
|
|
t1 = double_int_rshift (a, count, prec, false);
|
|
|
|
t2 = double_int_lshift (a, prec - count, prec, false);
|
|
|
|
|
|
|
|
return double_int_ior (t1, t2);
|
|
|
|
}
|
|
|
|
|
2006-03-03 01:38:20 +01:00
|
|
|
/* Returns -1 if A < B, 0 if A == B and 1 if A > B. Signedness of the
|
|
|
|
comparison is given by UNS. */
|
|
|
|
|
|
|
|
int
|
|
|
|
double_int_cmp (double_int a, double_int b, bool uns)
|
|
|
|
{
|
|
|
|
if (uns)
|
|
|
|
return double_int_ucmp (a, b);
|
|
|
|
else
|
|
|
|
return double_int_scmp (a, b);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compares two unsigned values A and B. Returns -1 if A < B, 0 if A == B,
|
|
|
|
and 1 if A > B. */
|
|
|
|
|
|
|
|
int
|
|
|
|
double_int_ucmp (double_int a, double_int b)
|
|
|
|
{
|
|
|
|
if ((unsigned HOST_WIDE_INT) a.high < (unsigned HOST_WIDE_INT) b.high)
|
|
|
|
return -1;
|
|
|
|
if ((unsigned HOST_WIDE_INT) a.high > (unsigned HOST_WIDE_INT) b.high)
|
|
|
|
return 1;
|
|
|
|
if (a.low < b.low)
|
|
|
|
return -1;
|
|
|
|
if (a.low > b.low)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compares two signed values A and B. Returns -1 if A < B, 0 if A == B,
|
|
|
|
and 1 if A > B. */
|
|
|
|
|
|
|
|
int
|
|
|
|
double_int_scmp (double_int a, double_int b)
|
|
|
|
{
|
|
|
|
if (a.high < b.high)
|
|
|
|
return -1;
|
|
|
|
if (a.high > b.high)
|
|
|
|
return 1;
|
2007-08-07 01:18:57 +02:00
|
|
|
if (a.low < b.low)
|
2006-03-03 01:38:20 +01:00
|
|
|
return -1;
|
2007-08-07 01:18:57 +02:00
|
|
|
if (a.low > b.low)
|
2006-03-03 01:38:20 +01:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
double-int.h (double_int_to_shwi, [...]): Implement as static inline.
* double-int.h (double_int_to_shwi, double_int_to_uhwi,
double_int_fits_in_uhwi_p): Implement as static inline.
(double_int_xor): New inline function.
(double_int_lrotate, double_int_rrotate, double_int_max,
double_int_umax, double_int_smax, double_int_min, double_int_umin,
double_int_smin): Declare.
(lrotate_double, rrotate_double): Remove declaration.
* double-int.c (double_int_fits_in_uhwi_p, double_int_to_shwi,
double_int_to_uhwi, lrotate_double, rrotate_double): Remove function.
(double_int_lrotate, double_int_rrotate, double_int_max,
double_int_umax, double_int_smax, double_int_min, double_int_umin,
double_int_smin): New function.
* fold-const.c (int_const_binop): Clean up, use double_int_*
functions.
* simplify-rtx.c (simplify_const_binary_operation): Clean up, use
double_int_* and immed_double_int_const functions.
From-SVN: r161002
2010-06-18 17:58:48 +02:00
|
|
|
/* Compares two values A and B. Returns max value. Signedness of the
|
|
|
|
comparison is given by UNS. */
|
|
|
|
|
|
|
|
double_int
|
|
|
|
double_int_max (double_int a, double_int b, bool uns)
|
|
|
|
{
|
|
|
|
return (double_int_cmp (a, b, uns) == 1) ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compares two signed values A and B. Returns max value. */
|
|
|
|
|
|
|
|
double_int double_int_smax (double_int a, double_int b)
|
|
|
|
{
|
|
|
|
return (double_int_scmp (a, b) == 1) ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compares two unsigned values A and B. Returns max value. */
|
|
|
|
|
|
|
|
double_int double_int_umax (double_int a, double_int b)
|
|
|
|
{
|
|
|
|
return (double_int_ucmp (a, b) == 1) ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compares two values A and B. Returns mix value. Signedness of the
|
|
|
|
comparison is given by UNS. */
|
|
|
|
|
|
|
|
double_int double_int_min (double_int a, double_int b, bool uns)
|
|
|
|
{
|
|
|
|
return (double_int_cmp (a, b, uns) == -1) ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compares two signed values A and B. Returns min value. */
|
|
|
|
|
|
|
|
double_int double_int_smin (double_int a, double_int b)
|
|
|
|
{
|
|
|
|
return (double_int_scmp (a, b) == -1) ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compares two unsigned values A and B. Returns min value. */
|
|
|
|
|
|
|
|
double_int double_int_umin (double_int a, double_int b)
|
|
|
|
{
|
|
|
|
return (double_int_ucmp (a, b) == -1) ? a : b;
|
|
|
|
}
|
|
|
|
|
2006-03-03 01:38:20 +01:00
|
|
|
/* Splits last digit of *CST (taken as unsigned) in BASE and returns it. */
|
|
|
|
|
|
|
|
static unsigned
|
|
|
|
double_int_split_digit (double_int *cst, unsigned base)
|
|
|
|
{
|
|
|
|
unsigned HOST_WIDE_INT resl, reml;
|
|
|
|
HOST_WIDE_INT resh, remh;
|
|
|
|
|
|
|
|
div_and_round_double (FLOOR_DIV_EXPR, true, cst->low, cst->high, base, 0,
|
|
|
|
&resl, &resh, &reml, &remh);
|
|
|
|
cst->high = resh;
|
|
|
|
cst->low = resl;
|
|
|
|
|
|
|
|
return reml;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Dumps CST to FILE. If UNS is true, CST is considered to be unsigned,
|
|
|
|
otherwise it is signed. */
|
|
|
|
|
|
|
|
void
|
|
|
|
dump_double_int (FILE *file, double_int cst, bool uns)
|
|
|
|
{
|
|
|
|
unsigned digits[100], n;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (double_int_zero_p (cst))
|
|
|
|
{
|
|
|
|
fprintf (file, "0");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!uns && double_int_negative_p (cst))
|
|
|
|
{
|
|
|
|
fprintf (file, "-");
|
|
|
|
cst = double_int_neg (cst);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (n = 0; !double_int_zero_p (cst); n++)
|
|
|
|
digits[n] = double_int_split_digit (&cst, 10);
|
|
|
|
for (i = n - 1; i >= 0; i--)
|
|
|
|
fprintf (file, "%u", digits[i]);
|
|
|
|
}
|
2007-04-30 21:14:04 +02:00
|
|
|
|
|
|
|
|
|
|
|
/* Sets RESULT to VAL, taken unsigned if UNS is true and as signed
|
|
|
|
otherwise. */
|
|
|
|
|
|
|
|
void
|
|
|
|
mpz_set_double_int (mpz_t result, double_int val, bool uns)
|
|
|
|
{
|
|
|
|
bool negate = false;
|
|
|
|
unsigned HOST_WIDE_INT vp[2];
|
|
|
|
|
|
|
|
if (!uns && double_int_negative_p (val))
|
|
|
|
{
|
|
|
|
negate = true;
|
|
|
|
val = double_int_neg (val);
|
|
|
|
}
|
|
|
|
|
|
|
|
vp[0] = val.low;
|
|
|
|
vp[1] = (unsigned HOST_WIDE_INT) val.high;
|
|
|
|
mpz_import (result, 2, -1, sizeof (HOST_WIDE_INT), 0, 0, vp);
|
|
|
|
|
|
|
|
if (negate)
|
|
|
|
mpz_neg (result, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns VAL converted to TYPE. If WRAP is true, then out-of-range
|
|
|
|
values of VAL will be wrapped; otherwise, they will be set to the
|
|
|
|
appropriate minimum or maximum TYPE bound. */
|
|
|
|
|
|
|
|
double_int
|
alias.c (component_uses_parent_alias_set): Constify.
* alias.c (component_uses_parent_alias_set): Constify.
* alias.h (component_uses_parent_alias_set): Likewise.
* cfgrtl.c (print_rtl_with_bb): Likewise.
* double-int.c (tree_to_double_int, double_int_fits_to_tree_p,
mpz_get_double_int): Likewise.
* double-int.h (double_int_fits_to_tree_p, tree_to_double_int,
mpz_get_double_int): Likewise.
* expr.c (is_aligning_offset, undefined_operand_subword_p,
mostly_zeros_p, all_zeros_p, safe_from_p, is_aligning_offset):
Likewise.
* expr.h (safe_from_p): Likewise.
* gimple-low.c (try_catch_may_fallthru, block_may_fallthru):
Likewise.
* gimplify.c (should_carry_locus_p, zero_sized_field_decl,
zero_sized_type, goa_lhs_expr_p): Likewise.
* omp-low.c (is_variable_sized, use_pointer_for_field): Likewise.
* rtl.h (print_rtl_with_bb): Likewise.
* sched-vis.c (print_exp, print_value, print_pattern): Likewise.
* tree-cfg.c (const_first_stmt, const_last_stmt): New.
* tree-flow-inline.h (bb_stmt_list): Constify.
(cbsi_start, cbsi_last, cbsi_end_p, cbsi_next, cbsi_prev,
cbsi_stmt): New.
* tree-flow.h (const_block_stmt_iterator, cbsi_start, cbsi_last,
const_first_stmt, const_last_stmt): New.
(block_may_fallthru, empty_block_p): Constify.
* tree-iterator.c (EXPR_FIRST_BODY, EXPR_LAST_BODY,
EXPR_ONLY_BODY): New.
(expr_first, expr_last, expr_only): Use macro for body.
(const_expr_first, const_expr_last, const_expr_only): New.
* tree-iterator.h (const_tree_stmt_iterator, ctsi_start,
ctsi_last, ctsi_end_p, ctsi_one_before_end_p, ctsi_next,
ctsi_prev, ctsi_stmt): New.
* tree-scalar-evolution.c (get_loop_exit_condition): Constify.
* tree-scalar-evolution.h (get_loop_exit_condition): Likewise.
* tree-ssa-loop-niter.c (loop_only_exit_p,
derive_constant_upper_bound): Likewise.
* tree-ssa-phiopt.c (empty_block_p): Likewise.
* tree-ssa-threadupdate.c (redirection_block_p): Likewise.
* tree-vectorizer.c (slpeel_can_duplicate_loop_p): Likewise.
* tree-vectorizer.h (slpeel_can_duplicate_loop_p): Likewise.
* tree-vrp.c (vrp_bitmap_equal_p): Likewise.
* tree.c (get_type_static_bounds): Likewise.
* tree.h (const_expr_first, const_expr_last, const_expr_only): New.
(get_type_static_bounds): Constify.
From-SVN: r127483
2007-08-14 17:18:11 +02:00
|
|
|
mpz_get_double_int (const_tree type, mpz_t val, bool wrap)
|
2007-04-30 21:14:04 +02:00
|
|
|
{
|
|
|
|
unsigned HOST_WIDE_INT *vp;
|
|
|
|
size_t count, numb;
|
|
|
|
double_int res;
|
|
|
|
|
|
|
|
if (!wrap)
|
2009-11-25 11:55:54 +01:00
|
|
|
{
|
2007-04-30 21:14:04 +02:00
|
|
|
mpz_t min, max;
|
|
|
|
|
|
|
|
mpz_init (min);
|
|
|
|
mpz_init (max);
|
|
|
|
get_type_static_bounds (type, min, max);
|
|
|
|
|
|
|
|
if (mpz_cmp (val, min) < 0)
|
|
|
|
mpz_set (val, min);
|
|
|
|
else if (mpz_cmp (val, max) > 0)
|
|
|
|
mpz_set (val, max);
|
|
|
|
|
|
|
|
mpz_clear (min);
|
|
|
|
mpz_clear (max);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Determine the number of unsigned HOST_WIDE_INT that are required
|
|
|
|
for representing the value. The code to calculate count is
|
|
|
|
extracted from the GMP manual, section "Integer Import and Export":
|
|
|
|
http://gmplib.org/manual/Integer-Import-and-Export.html */
|
|
|
|
numb = 8*sizeof(HOST_WIDE_INT);
|
|
|
|
count = (mpz_sizeinbase (val, 2) + numb-1) / numb;
|
|
|
|
if (count < 2)
|
|
|
|
count = 2;
|
|
|
|
vp = (unsigned HOST_WIDE_INT *) alloca (count * sizeof(HOST_WIDE_INT));
|
|
|
|
|
|
|
|
vp[0] = 0;
|
|
|
|
vp[1] = 0;
|
|
|
|
mpz_export (vp, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, val);
|
|
|
|
|
|
|
|
gcc_assert (wrap || count <= 2);
|
|
|
|
|
|
|
|
res.low = vp[0];
|
|
|
|
res.high = (HOST_WIDE_INT) vp[1];
|
|
|
|
|
|
|
|
res = double_int_ext (res, TYPE_PRECISION (type), TYPE_UNSIGNED (type));
|
|
|
|
if (mpz_sgn (val) < 0)
|
|
|
|
res = double_int_neg (res);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|