gcc/libgcc/config/libbid/bid128_to_uint32.c
H.J. Lu b2a00c8984 Makefile.in (dfp-filenames): Replace decimal_globals...
libgcc/

2007-09-27  H.J. Lu  <hongjiu.lu@intel.com>

	* Makefile.in (dfp-filenames): Replace decimal_globals,
	decimal_data, binarydecimal and convert_data with
	bid_decimal_globals, bid_decimal_data, bid_binarydecimal
	and bid_convert_data, respectively.

libgcc/config/libbid/

2007-09-27  H.J. Lu  <hongjiu.lu@intel.com>

	* bid128_fromstring.c: Removed.

	* bid_dpd.c: New from libbid 2007-09-26.
	* bid128_to_int16.c: Likewise.
	* bid128_to_int8.c: Likewise.
	* bid128_to_uint8.c: Likewise.
	* bid128_to_uint16.c: Likewise.
	* bid64_to_int16.c: Likewise.
	* bid64_to_int8.c: Likewise.
	* bid64_to_uint16.c: Likewise.
	* bid64_to_uint8.c: Likewise.

	* bid128_2_str.h: Updated from libbid 2007-09-26.
	* bid128_2_str_macros.h: Likewise.
	* bid128_2_str_tables.c: Likewise.
	* bid128_add.c: Likewise.
	* bid128.c: Likewise.
	* bid128_compare.c: Likewise.
	* bid128_div.c: Likewise.
	* bid128_fma.c: Likewise.
	* bid128_logb.c: Likewise.
	* bid128_minmax.c: Likewise.
	* bid128_mul.c: Likewise.
	* bid128_next.c: Likewise.
	* bid128_noncomp.c: Likewise.
	* bid128_quantize.c: Likewise.
	* bid128_rem.c: Likewise.
	* bid128_round_integral.c: Likewise.
	* bid128_scalb.c: Likewise.
	* bid128_sqrt.c: Likewise.
	* bid128_string.c: Likewise.
	* bid128_to_int32.c: Likewise.
	* bid128_to_int64.c: Likewise.
	* bid128_to_uint32.c: Likewise.
	* bid128_to_uint64.c: Likewise.
	* bid32_to_bid128.c: Likewise.
	* bid32_to_bid64.c: Likewise.
	* bid64_add.c: Likewise.
	* bid64_compare.c: Likewise.
	* bid64_div.c: Likewise.
	* bid64_fma.c: Likewise.
	* bid64_logb.c: Likewise.
	* bid64_minmax.c: Likewise.
	* bid64_mul.c: Likewise.
	* bid64_next.c: Likewise.
	* bid64_noncomp.c: Likewise.
	* bid64_quantize.c: Likewise.
	* bid64_rem.c: Likewise.
	* bid64_round_integral.c: Likewise.
	* bid64_scalb.c: Likewise.
	* bid64_sqrt.c: Likewise.
	* bid64_string.c: Likewise.
	* bid64_to_bid128.c: Likewise.
	* bid64_to_int32.c: Likewise.
	* bid64_to_int64.c: Likewise.
	* bid64_to_uint32.c: Likewise.
	* bid64_to_uint64.c: Likewise.
	* bid_b2d.h: Likewise.
	* bid_binarydecimal.c: Likewise.
	* bid_conf.h: Likewise.
	* bid_convert_data.c: Likewise.
	* bid_decimal_data.c: Likewise.
	* bid_decimal_globals.c: Likewise.
	* bid_div_macros.h: Likewise.
	* bid_flag_operations.c: Likewise.
	* bid_from_int.c: Likewise.
	* bid_functions.h: Likewise.
	* bid_gcc_intrinsics.h: Likewise.
	* bid_inline_add.h: Likewise.
	* bid_internal.h: Likewise.
	* bid_round.c: Likewise.
	* bid_sqrt_macros.h: Likewise.
	* _addsub_dd.c: Likewise.
	* _addsub_sd.c: Likewise.
	* _addsub_td.c: Likewise.
	* _dd_to_df.c: Likewise.
	* _dd_to_di.c: Likewise.
	* _dd_to_sd.c: Likewise.
	* _dd_to_sf.c: Likewise.
	* _dd_to_si.c: Likewise.
	* _dd_to_td.c: Likewise.
	* _dd_to_tf.c: Likewise.
	* _dd_to_udi.c: Likewise.
	* _dd_to_usi.c: Likewise.
	* _dd_to_xf.c: Likewise.
	* _df_to_dd.c: Likewise.
	* _df_to_sd.c: Likewise.
	* _df_to_td.c: Likewise.
	* _di_to_dd.c: Likewise.
	* _di_to_sd.c: Likewise.
	* _di_to_td.c: Likewise.
	* _div_dd.c: Likewise.
	* _div_sd.c: Likewise.
	* _div_td.c: Likewise.
	* _eq_dd.c: Likewise.
	* _eq_sd.c: Likewise.
	* _eq_td.c: Likewise.
	* _ge_dd.c: Likewise.
	* _ge_sd.c: Likewise.
	* _ge_td.c: Likewise.
	* _gt_dd.c: Likewise.
	* _gt_sd.c: Likewise.
	* _gt_td.c: Likewise.
	* _isinfd128.c: Likewise.
	* _isinfd32.c: Likewise.
	* _isinfd64.c: Likewise.
	* _le_dd.c: Likewise.
	* _le_sd.c: Likewise.
	* _le_td.c: Likewise.
	* _lt_dd.c: Likewise.
	* _lt_sd.c: Likewise.
	* _lt_td.c: Likewise.
	* _mul_dd.c: Likewise.
	* _mul_sd.c: Likewise.
	* _mul_td.c: Likewise.
	* _ne_dd.c: Likewise.
	* _ne_sd.c: Likewise.
	* _ne_td.c: Likewise.
	* _sd_to_dd.c: Likewise.
	* _sd_to_df.c: Likewise.
	* _sd_to_di.c: Likewise.
	* _sd_to_sf.c: Likewise.
	* _sd_to_si.c: Likewise.
	* _sd_to_td.c: Likewise.
	* _sd_to_tf.c: Likewise.
	* _sd_to_udi.c: Likewise.
	* _sd_to_usi.c: Likewise.
	* _sd_to_xf.c: Likewise.
	* _sf_to_dd.c: Likewise.
	* _sf_to_sd.c: Likewise.
	* _sf_to_td.c: Likewise.
	* _si_to_dd.c: Likewise.
	* _si_to_sd.c: Likewise.
	* _si_to_td.c: Likewise.
	* _td_to_dd.c: Likewise.
	* _td_to_df.c: Likewise.
	* _td_to_di.c: Likewise.
	* _td_to_sd.c: Likewise.
	* _td_to_sf.c: Likewise.
	* _td_to_si.c: Likewise.
	* _td_to_tf.c: Likewise.
	* _td_to_udi.c: Likewise.
	* _td_to_usi.c: Likewise.
	* _td_to_xf.c: Likewise.
	* _tf_to_dd.c: Likewise.
	* _tf_to_sd.c: Likewise.
	* _tf_to_td.c: Likewise.
	* _udi_to_dd.c: Likewise.
	* _udi_to_sd.c: Likewise.
	* _udi_to_td.c: Likewise.
	* _unord_dd.c: Likewise.
	* _unord_sd.c: Likewise.
	* _unord_td.c: Likewise.
	* _usi_to_dd.c: Likewise.
	* _usi_to_sd.c: Likewise.
	* _usi_to_td.c: Likewise.
	* _xf_to_dd.c: Likewise.
	* _xf_to_sd.c: Likewise.
	* _xf_to_td.c: Likewise.

2007-09-27  H.J. Lu  <hongjiu.lu@intel.com>

	* b2d.h: Renamed to ...
	* bid_b2d.h: This.

	* bid128_to_string.c: Renamed to ...
	* bid128_string.c: This.

	* bid_intrinsics.h: Renamed to ...
	* bid_gcc_intrinsics.h: This.

	* bid_string.c: Renamed to ...
	* bid64_string.c: This.

	* binarydecimal.c: Renamed to ...
	* bid_decimal_globals.c: This.

	* convert_data.c: Renamed to ...
	* bid_convert_data.c: This.

	* decimal_data.c: Renamed to ...
	* bid_decimal_data.c: This.

	* decimal_globals.c: Renamed to ...
	* bid_decimal_globals.c: This.

	* div_macros.h: Renamed to ...
	* bid_div_macros.h: This.

	* inline_bid_add.h: Renamed to ...
	* bid_inline_add.h: This.

	* sqrt_macros.h: Renamed to ...
	* bid_sqrt_macros.h: This.

From-SVN: r128841
2007-09-27 10:47:23 -07:00

3594 lines
127 KiB
C

/* Copyright (C) 2007 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
#include "bid_internal.h"
/*****************************************************************************
* BID128_to_uint32_rnint
****************************************************************************/
BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (unsigned int,
bid128_to_uint32_rnint, x)
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
// Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1, C;
UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
UINT256 fstar;
UINT256 P256;
// unpack x
x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1.w[1] = x.w[1] & MASK_COEFF;
C1.w[0] = x.w[0];
// check for NaN or Infinity
if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
// x is special
if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is QNaN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
} else { // x is not a NaN, so it must be infinity
if (!x_sign) { // x is +inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is -inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
}
}
// check for non-canonical values (after the check for special values)
if ((C1.w[1] > 0x0001ed09bead87c0ull)
|| (C1.w[1] == 0x0001ed09bead87c0ull
&& (C1.w[0] > 0x378d8e63ffffffffull))
|| ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
res = 0x00000000;
BID_RETURN (res);
} else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
// x is 0
res = 0x00000000;
BID_RETURN (res);
} else { // x is not special and is not zero
// q = nr. of decimal digits in x
// determine first the nr. of bits in x
if (C1.w[1] == 0) {
if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
} else { // x < 2^32
tmp1.d = (double) (C1.w[0]); // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // if x < 2^53
tmp1.d = (double) C1.w[0]; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
tmp1.d = (double) C1.w[1]; // exact conversion
x_nr_bits =
65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
q = nr_digits[x_nr_bits - 1].digits1;
if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
|| (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
&& C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
q++;
}
exp = (x_exp >> 49) - 6176;
if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
} else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
if (x_sign) { // if n < 0 and q + exp = 10
// if n < -1/2 then n cannot be converted to uint32 with RN
// too large if c(0)c(1)...c(9).c(10)...c(q-1) > 1/2
// <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x05, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 > 0x05ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 > 0x05 <=>
// C > 0x05 * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 1/2 up)
tmp64 = 0x05ull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
} else { // if n > 0 and q + exp = 10
// if n >= 2^32 - 1/2 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32-1/2
// too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x9fffffffbull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb <=>
// C >= 0x9fffffffb * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 2^32-1/2 up)
tmp64 = 0x9fffffffbull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
}
}
// n is not too large to be converted to int32: -1/2 <= n < 2^32 - 1/2
// Note: some of the cases tested for above fall through to this point
if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
// return 0
res = 0x00000000;
BID_RETURN (res);
} else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
// if 0.c(0)c(1)...c(q-1) <= 0.5 <=> c(0)c(1)...c(q-1) <= 5 * 10^(q-1)
// res = 0
// else if x > 0
// res = +1
// else // if x < 0
// invalid exc
ind = q - 1;
if (ind <= 18) { // 0 <= ind <= 18
if ((C1.w[1] == 0) && (C1.w[0] <= midpoint64[ind])) {
res = 0x00000000; // return 0
} else if (!x_sign) { // n > 0
res = 0x00000001; // return +1
} else {
res = 0x80000000;
*pfpsf |= INVALID_EXCEPTION;
}
} else { // 19 <= ind <= 33
if ((C1.w[1] < midpoint128[ind - 19].w[1])
|| ((C1.w[1] == midpoint128[ind - 19].w[1])
&& (C1.w[0] <= midpoint128[ind - 19].w[0]))) {
res = 0x00000000; // return 0
} else if (!x_sign) { // n > 0
res = 0x00000001; // return +1
} else {
res = 0x80000000;
*pfpsf |= INVALID_EXCEPTION;
}
}
} else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// 1 <= x < 2^32-1/2 so x can be rounded
// to nearest to a 32-bit unsigned integer
if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
C1.w[0] = C1.w[0] + midpoint64[ind - 1];
} else {
C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
}
if (C1.w[0] < tmp64)
C1.w[1]++;
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
// 1 <= x <= 33
// kx = 10^(-x) = ten2mk128[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 118 bits
__mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]);
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[1] = P256.w[3];
Cstar.w[0] = P256.w[2];
fstar.w[3] = 0;
fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
} else { // 22 <= ind - 1 <= 33
Cstar.w[1] = 0;
Cstar.w[0] = P256.w[3];
fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
fstar.w[2] = P256.w[2];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
}
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g.
// if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
// shift right C* by Ex-128 = shiftright128[ind]
shift = shiftright128[ind - 1]; // 0 <= shift <= 102
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[0] =
(Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
// redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
} else { // 22 <= ind - 1 <= 33
Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
}
// if the result was a midpoint it was rounded away from zero, so
// it will need a correction
// check for midpoints
if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
&& (fstar.w[1] || fstar.w[0])
&& (fstar.w[1] < ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) {
// the result is a midpoint; round to nearest
if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
// if floor(C*) is odd C = floor(C*) - 1; the result >= 1
Cstar.w[0]--; // Cstar.w[0] is now even
} // else MP in [ODD, EVEN]
}
res = Cstar.w[0]; // the result is positive
} else if (exp == 0) {
// 1 <= q <= 10
// res = C (exact)
res = C1.w[0];
} else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = C * 10^exp (exact)
res = C1.w[0] * ten2k64[exp];
}
}
}
BID_RETURN (res);
}
/*****************************************************************************
* BID128_to_uint32_xrnint
****************************************************************************/
BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (unsigned int,
bid128_to_uint32_xrnint, x)
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
// Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
UINT64 tmp64, tmp64A;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1, C;
UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
UINT256 fstar;
UINT256 P256;
unsigned int tmp_inexact = 0;
// unpack x
x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1.w[1] = x.w[1] & MASK_COEFF;
C1.w[0] = x.w[0];
// check for NaN or Infinity
if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
// x is special
if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is QNaN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
} else { // x is not a NaN, so it must be infinity
if (!x_sign) { // x is +inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is -inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
}
}
// check for non-canonical values (after the check for special values)
if ((C1.w[1] > 0x0001ed09bead87c0ull)
|| (C1.w[1] == 0x0001ed09bead87c0ull
&& (C1.w[0] > 0x378d8e63ffffffffull))
|| ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
res = 0x00000000;
BID_RETURN (res);
} else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
// x is 0
res = 0x00000000;
BID_RETURN (res);
} else { // x is not special and is not zero
// q = nr. of decimal digits in x
// determine first the nr. of bits in x
if (C1.w[1] == 0) {
if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
} else { // x < 2^32
tmp1.d = (double) (C1.w[0]); // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // if x < 2^53
tmp1.d = (double) C1.w[0]; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
tmp1.d = (double) C1.w[1]; // exact conversion
x_nr_bits =
65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
q = nr_digits[x_nr_bits - 1].digits1;
if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
|| (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
&& C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
q++;
}
exp = (x_exp >> 49) - 6176;
if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
} else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
if (x_sign) { // if n < 0 and q + exp = 10
// if n < -1/2 then n cannot be converted to uint32 with RN
// too large if c(0)c(1)...c(9).c(10)...c(q-1) > 1/2
// <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x05, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 > 0x05ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 > 0x05 <=>
// C > 0x05 * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 1/2 up)
tmp64 = 0x05ull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
} else { // if n > 0 and q + exp = 10
// if n >= 2^32 - 1/2 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32-1/2
// too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x9fffffffbull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb <=>
// C >= 0x9fffffffb * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 2^32-1/2 up)
tmp64 = 0x9fffffffbull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
}
}
// n is not too large to be converted to int32: -1/2 <= n < 2^32 - 1/2
// Note: some of the cases tested for above fall through to this point
if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// return 0
res = 0x00000000;
BID_RETURN (res);
} else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
// if 0.c(0)c(1)...c(q-1) <= 0.5 <=> c(0)c(1)...c(q-1) <= 5 * 10^(q-1)
// res = 0
// else if x > 0
// res = +1
// else // if x < 0
// invalid exc
ind = q - 1;
if (ind <= 18) { // 0 <= ind <= 18
if ((C1.w[1] == 0) && (C1.w[0] <= midpoint64[ind])) {
res = 0x00000000; // return 0
} else if (!x_sign) { // n > 0
res = 0x00000001; // return +1
} else {
res = 0x80000000;
*pfpsf |= INVALID_EXCEPTION;
BID_RETURN (res);
}
} else { // 19 <= ind <= 33
if ((C1.w[1] < midpoint128[ind - 19].w[1])
|| ((C1.w[1] == midpoint128[ind - 19].w[1])
&& (C1.w[0] <= midpoint128[ind - 19].w[0]))) {
res = 0x00000000; // return 0
} else if (!x_sign) { // n > 0
res = 0x00000001; // return +1
} else {
res = 0x80000000;
*pfpsf |= INVALID_EXCEPTION;
BID_RETURN (res);
}
}
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
} else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// 1 <= x < 2^32-1/2 so x can be rounded
// to nearest to a 32-bit unsigned integer
if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
C1.w[0] = C1.w[0] + midpoint64[ind - 1];
} else {
C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
}
if (C1.w[0] < tmp64)
C1.w[1]++;
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
// 1 <= x <= 33
// kx = 10^(-x) = ten2mk128[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 118 bits
__mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]);
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[1] = P256.w[3];
Cstar.w[0] = P256.w[2];
fstar.w[3] = 0;
fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
} else { // 22 <= ind - 1 <= 33
Cstar.w[1] = 0;
Cstar.w[0] = P256.w[3];
fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
fstar.w[2] = P256.w[2];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
}
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g.
// if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
// shift right C* by Ex-128 = shiftright128[ind]
shift = shiftright128[ind - 1]; // 0 <= shift <= 102
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[0] =
(Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
// redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
} else { // 22 <= ind - 1 <= 33
Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
}
// determine inexactness of the rounding of C*
// if (0 < f* - 1/2 < 10^(-x)) then
// the result is exact
// else // if (f* - 1/2 > T*) then
// the result is inexact
if (ind - 1 <= 2) {
if (fstar.w[1] > 0x8000000000000000ull ||
(fstar.w[1] == 0x8000000000000000ull
&& fstar.w[0] > 0x0ull)) {
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
if (tmp64 > ten2mk128trunc[ind - 1].w[1]
|| (tmp64 == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
}
} else if (ind - 1 <= 21) { // if 3 <= ind <= 21
if (fstar.w[3] > 0x0 ||
(fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) ||
(fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] &&
(fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[2] - onehalf128[ind - 1];
tmp64A = fstar.w[3];
if (tmp64 > fstar.w[2])
tmp64A--;
if (tmp64A || tmp64
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
}
} else { // if 22 <= ind <= 33
if (fstar.w[3] > onehalf128[ind - 1] ||
(fstar.w[3] == onehalf128[ind - 1] &&
(fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[3] - onehalf128[ind - 1];
if (tmp64 || fstar.w[2]
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
}
}
// if the result was a midpoint it was rounded away from zero, so
// it will need a correction
// check for midpoints
if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
&& (fstar.w[1] || fstar.w[0])
&& (fstar.w[1] < ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) {
// the result is a midpoint; round to nearest
if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
// if floor(C*) is odd C = floor(C*) - 1; the result >= 1
Cstar.w[0]--; // Cstar.w[0] is now even
} // else MP in [ODD, EVEN]
}
res = Cstar.w[0]; // the result is positive
if (tmp_inexact)
*pfpsf |= INEXACT_EXCEPTION;
} else if (exp == 0) {
// 1 <= q <= 10
// res = C (exact)
res = C1.w[0];
} else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = C * 10^exp (exact)
res = C1.w[0] * ten2k64[exp];
}
}
}
BID_RETURN (res);
}
/*****************************************************************************
* BID128_to_uint32_floor
****************************************************************************/
BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (unsigned int,
bid128_to_uint32_floor, x)
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
// Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
UINT64 tmp64, tmp64A;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1, C;
UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
UINT256 fstar;
UINT256 P256;
int is_inexact_gt_midpoint = 0;
int is_midpoint_lt_even = 0;
// unpack x
x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1.w[1] = x.w[1] & MASK_COEFF;
C1.w[0] = x.w[0];
// check for NaN or Infinity
if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
// x is special
if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is QNaN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
} else { // x is not a NaN, so it must be infinity
if (!x_sign) { // x is +inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is -inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
}
}
// check for non-canonical values (after the check for special values)
if ((C1.w[1] > 0x0001ed09bead87c0ull)
|| (C1.w[1] == 0x0001ed09bead87c0ull
&& (C1.w[0] > 0x378d8e63ffffffffull))
|| ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
res = 0x00000000;
BID_RETURN (res);
} else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
// x is 0
res = 0x00000000;
BID_RETURN (res);
} else { // x is not special and is not zero
// x < 0 is invalid
if (x_sign) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// x > 0 from this point on
// q = nr. of decimal digits in x
// determine first the nr. of bits in x
if (C1.w[1] == 0) {
if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
} else { // x < 2^32
tmp1.d = (double) (C1.w[0]); // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // if x < 2^53
tmp1.d = (double) C1.w[0]; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
tmp1.d = (double) C1.w[1]; // exact conversion
x_nr_bits =
65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
q = nr_digits[x_nr_bits - 1].digits1;
if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
|| (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
&& C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
q++;
}
exp = (x_exp >> 49) - 6176;
if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
} else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in a signed 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
// n > 0 and q + exp = 10
// if n >= 2^32 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32
// too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0xa00000000, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0xa00000000ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0xa00000000 <=>
// C >= 0xa00000000 * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 2^32 up)
tmp64 = 0xa00000000ull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
}
// n is not too large to be converted to int32: 0 <= n < 2^31
// Note: some of the cases tested for above fall through to this point
if ((q + exp) <= 0) {
// n = +0.0...c(0)c(1)...c(q-1) or n = +0.c(0)c(1)...c(q-1)
// return 0
res = 0x00000000;
BID_RETURN (res);
} else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
// 1 <= x < 2^32 so x can be rounded down to a 32-bit unsigned integer
if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
C1.w[0] = C1.w[0] + midpoint64[ind - 1];
} else {
C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
}
if (C1.w[0] < tmp64)
C1.w[1]++;
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
// 1 <= x <= 33
// kx = 10^(-x) = ten2mk128[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 118 bits
__mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]);
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[1] = P256.w[3];
Cstar.w[0] = P256.w[2];
fstar.w[3] = 0;
fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
} else { // 22 <= ind - 1 <= 33
Cstar.w[1] = 0;
Cstar.w[0] = P256.w[3];
fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
fstar.w[2] = P256.w[2];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
}
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g.
// if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
// shift right C* by Ex-128 = shiftright128[ind]
shift = shiftright128[ind - 1]; // 0 <= shift <= 102
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[0] =
(Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
// redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
} else { // 22 <= ind - 1 <= 33
Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
}
// determine inexactness of the rounding of C*
// if (0 < f* - 1/2 < 10^(-x)) then
// the result is exact
// else // if (f* - 1/2 > T*) then
// the result is inexact
if (ind - 1 <= 2) {
if (fstar.w[1] > 0x8000000000000000ull ||
(fstar.w[1] == 0x8000000000000000ull
&& fstar.w[0] > 0x0ull)) {
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
if (tmp64 > ten2mk128trunc[ind - 1].w[1]
|| (tmp64 == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) {
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
is_inexact_gt_midpoint = 1;
}
} else if (ind - 1 <= 21) { // if 3 <= ind <= 21
if (fstar.w[3] > 0x0 ||
(fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) ||
(fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] &&
(fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[2] - onehalf128[ind - 1];
tmp64A = fstar.w[3];
if (tmp64 > fstar.w[2])
tmp64A--;
if (tmp64A || tmp64
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
is_inexact_gt_midpoint = 1;
}
} else { // if 22 <= ind <= 33
if (fstar.w[3] > onehalf128[ind - 1] ||
(fstar.w[3] == onehalf128[ind - 1] &&
(fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[3] - onehalf128[ind - 1];
if (tmp64 || fstar.w[2]
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
is_inexact_gt_midpoint = 1;
}
}
// if the result was a midpoint it was rounded away from zero, so
// it will need a correction
// check for midpoints
if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
&& (fstar.w[1] || fstar.w[0])
&& (fstar.w[1] < ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) {
// the result is a midpoint; round to nearest
if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
// if floor(C*) is odd C = floor(C*) - 1; the result >= 1
Cstar.w[0]--; // Cstar.w[0] is now even
is_inexact_gt_midpoint = 0;
} else { // else MP in [ODD, EVEN]
is_midpoint_lt_even = 1;
is_inexact_gt_midpoint = 0;
}
}
// general correction for RM
if (is_midpoint_lt_even || is_inexact_gt_midpoint) {
Cstar.w[0] = Cstar.w[0] - 1;
} else {
; // the result is already correct
}
res = Cstar.w[0];
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
res = C1.w[0];
} else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
res = C1.w[0] * ten2k64[exp];
}
}
}
BID_RETURN (res);
}
/*****************************************************************************
* BID128_to_uint32_xfloor
****************************************************************************/
BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (unsigned int,
bid128_to_uint32_xfloor, x)
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
// Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
UINT64 tmp64, tmp64A;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1, C;
UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
UINT256 fstar;
UINT256 P256;
int is_inexact_gt_midpoint = 0;
int is_midpoint_lt_even = 0;
// unpack x
x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1.w[1] = x.w[1] & MASK_COEFF;
C1.w[0] = x.w[0];
// check for NaN or Infinity
if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
// x is special
if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is QNaN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
} else { // x is not a NaN, so it must be infinity
if (!x_sign) { // x is +inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is -inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
}
}
// check for non-canonical values (after the check for special values)
if ((C1.w[1] > 0x0001ed09bead87c0ull)
|| (C1.w[1] == 0x0001ed09bead87c0ull
&& (C1.w[0] > 0x378d8e63ffffffffull))
|| ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
res = 0x00000000;
BID_RETURN (res);
} else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
// x is 0
res = 0x00000000;
BID_RETURN (res);
} else { // x is not special and is not zero
// x < 0 is invalid
if (x_sign) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// x > 0 from this point on
// q = nr. of decimal digits in x
// determine first the nr. of bits in x
if (C1.w[1] == 0) {
if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
} else { // x < 2^32
tmp1.d = (double) (C1.w[0]); // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // if x < 2^53
tmp1.d = (double) C1.w[0]; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
tmp1.d = (double) C1.w[1]; // exact conversion
x_nr_bits =
65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
q = nr_digits[x_nr_bits - 1].digits1;
if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
|| (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
&& C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
q++;
}
exp = (x_exp >> 49) - 6176;
if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
} else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in a signed 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
// n > 0 and q + exp = 10
// if n >= 2^32 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32
// too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0xa00000000, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0xa00000000ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0xa00000000 <=>
// C >= 0xa00000000 * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 2^32 up)
tmp64 = 0xa00000000ull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
}
// n is not too large to be converted to int32: 0 <= n < 2^31
// Note: some of the cases tested for above fall through to this point
if ((q + exp) <= 0) {
// n = +0.0...c(0)c(1)...c(q-1) or n = +0.c(0)c(1)...c(q-1)
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// return 0
res = 0x00000000;
BID_RETURN (res);
} else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
// 1 <= x < 2^32 so x can be rounded down to a 32-bit unsigned integer
if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
C1.w[0] = C1.w[0] + midpoint64[ind - 1];
} else {
C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
}
if (C1.w[0] < tmp64)
C1.w[1]++;
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
// 1 <= x <= 33
// kx = 10^(-x) = ten2mk128[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 118 bits
__mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]);
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[1] = P256.w[3];
Cstar.w[0] = P256.w[2];
fstar.w[3] = 0;
fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
} else { // 22 <= ind - 1 <= 33
Cstar.w[1] = 0;
Cstar.w[0] = P256.w[3];
fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
fstar.w[2] = P256.w[2];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
}
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g.
// if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
// shift right C* by Ex-128 = shiftright128[ind]
shift = shiftright128[ind - 1]; // 0 <= shift <= 102
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[0] =
(Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
// redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
} else { // 22 <= ind - 1 <= 33
Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
}
// determine inexactness of the rounding of C*
// if (0 < f* - 1/2 < 10^(-x)) then
// the result is exact
// else // if (f* - 1/2 > T*) then
// the result is inexact
if (ind - 1 <= 2) {
if (fstar.w[1] > 0x8000000000000000ull ||
(fstar.w[1] == 0x8000000000000000ull
&& fstar.w[0] > 0x0ull)) {
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
if (tmp64 > ten2mk128trunc[ind - 1].w[1]
|| (tmp64 == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
is_inexact_gt_midpoint = 1;
}
} else if (ind - 1 <= 21) { // if 3 <= ind <= 21
if (fstar.w[3] > 0x0 ||
(fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) ||
(fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] &&
(fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[2] - onehalf128[ind - 1];
tmp64A = fstar.w[3];
if (tmp64 > fstar.w[2])
tmp64A--;
if (tmp64A || tmp64
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
is_inexact_gt_midpoint = 1;
}
} else { // if 22 <= ind <= 33
if (fstar.w[3] > onehalf128[ind - 1] ||
(fstar.w[3] == onehalf128[ind - 1] &&
(fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[3] - onehalf128[ind - 1];
if (tmp64 || fstar.w[2]
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
is_inexact_gt_midpoint = 1;
}
}
// if the result was a midpoint it was rounded away from zero, so
// it will need a correction
// check for midpoints
if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
&& (fstar.w[1] || fstar.w[0])
&& (fstar.w[1] < ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) {
// the result is a midpoint; round to nearest
if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
// if floor(C*) is odd C = floor(C*) - 1; the result >= 1
Cstar.w[0]--; // Cstar.w[0] is now even
is_inexact_gt_midpoint = 0;
} else { // else MP in [ODD, EVEN]
is_midpoint_lt_even = 1;
is_inexact_gt_midpoint = 0;
}
}
// general correction for RM
if (is_midpoint_lt_even || is_inexact_gt_midpoint) {
Cstar.w[0] = Cstar.w[0] - 1;
} else {
; // the result is already correct
}
res = Cstar.w[0];
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
res = C1.w[0];
} else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
res = C1.w[0] * ten2k64[exp];
}
}
}
BID_RETURN (res);
}
/*****************************************************************************
* BID128_to_uint32_ceil
****************************************************************************/
BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (unsigned int,
bid128_to_uint32_ceil, x)
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
// Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
UINT64 tmp64, tmp64A;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1, C;
UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
UINT256 fstar;
UINT256 P256;
int is_inexact_lt_midpoint = 0;
int is_midpoint_gt_even = 0;
// unpack x
x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1.w[1] = x.w[1] & MASK_COEFF;
C1.w[0] = x.w[0];
// check for NaN or Infinity
if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
// x is special
if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is QNaN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
} else { // x is not a NaN, so it must be infinity
if (!x_sign) { // x is +inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is -inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
}
}
// check for non-canonical values (after the check for special values)
if ((C1.w[1] > 0x0001ed09bead87c0ull)
|| (C1.w[1] == 0x0001ed09bead87c0ull
&& (C1.w[0] > 0x378d8e63ffffffffull))
|| ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
res = 0x00000000;
BID_RETURN (res);
} else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
// x is 0
res = 0x00000000;
BID_RETURN (res);
} else { // x is not special and is not zero
// q = nr. of decimal digits in x
// determine first the nr. of bits in x
if (C1.w[1] == 0) {
if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
} else { // x < 2^32
tmp1.d = (double) (C1.w[0]); // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // if x < 2^53
tmp1.d = (double) C1.w[0]; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
tmp1.d = (double) C1.w[1]; // exact conversion
x_nr_bits =
65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
q = nr_digits[x_nr_bits - 1].digits1;
if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
|| (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
&& C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
q++;
}
exp = (x_exp >> 49) - 6176;
if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
} else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in a signed 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
if (x_sign) { // if n < 0 and q + exp = 10
// if n <= -1 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 1
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x0aull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0x0a <=>
// C >= 0x0a * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 1 up)
tmp64 = 0x0aull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
} else { // if n > 0 and q + exp = 10
// if n > 2^32 - 1 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^32 - 1
// too large if 0.c(0)c(1)...c(q-1) * 10^11 > 0x9fffffff6, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 > 0x9fffffff6ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 > 0x9fffffff6 <=>
// C > 0x9fffffff6 * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 2^32 up)
tmp64 = 0x9fffffff6ull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
}
}
// n is not too large to be converted to int32: -2^32-1 < n <= 2^32-1
// Note: some of the cases tested for above fall through to this point
if ((q + exp) <= 0) {
// n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1)
// return 0
if (x_sign)
res = 0x00000000;
else
res = 0x00000001;
BID_RETURN (res);
} else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
// -2^32-1 < x <= -1 or 1 <= x <= 2^32-1 so x can be rounded
// toward positive infinity to a 32-bit signed integer
if (x_sign) { // x <= -1 is invalid
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// x > 0 from this point on
if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
C1.w[0] = C1.w[0] + midpoint64[ind - 1];
} else {
C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
}
if (C1.w[0] < tmp64)
C1.w[1]++;
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
// 1 <= x <= 33
// kx = 10^(-x) = ten2mk128[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 118 bits
__mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]);
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[1] = P256.w[3];
Cstar.w[0] = P256.w[2];
fstar.w[3] = 0;
fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
} else { // 22 <= ind - 1 <= 33
Cstar.w[1] = 0;
Cstar.w[0] = P256.w[3];
fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
fstar.w[2] = P256.w[2];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
}
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g.
// if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
// shift right C* by Ex-128 = shiftright128[ind]
shift = shiftright128[ind - 1]; // 0 <= shift <= 102
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[0] =
(Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
// redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
} else { // 22 <= ind - 1 <= 33
Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
}
// determine inexactness of the rounding of C*
// if (0 < f* - 1/2 < 10^(-x)) then
// the result is exact
// else // if (f* - 1/2 > T*) then
// the result is inexact
if (ind - 1 <= 2) {
if (fstar.w[1] > 0x8000000000000000ull ||
(fstar.w[1] == 0x8000000000000000ull
&& fstar.w[0] > 0x0ull)) {
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
if (tmp64 > ten2mk128trunc[ind - 1].w[1]
|| (tmp64 == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) {
is_inexact_lt_midpoint = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
;
}
} else if (ind - 1 <= 21) { // if 3 <= ind <= 21
if (fstar.w[3] > 0x0 ||
(fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) ||
(fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] &&
(fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[2] - onehalf128[ind - 1];
tmp64A = fstar.w[3];
if (tmp64 > fstar.w[2])
tmp64A--;
if (tmp64A || tmp64
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
is_inexact_lt_midpoint = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
}
} else { // if 22 <= ind <= 33
if (fstar.w[3] > onehalf128[ind - 1] ||
(fstar.w[3] == onehalf128[ind - 1] &&
(fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[3] - onehalf128[ind - 1];
if (tmp64 || fstar.w[2]
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
is_inexact_lt_midpoint = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
;
}
}
// if the result was a midpoint it was rounded away from zero, so
// it will need a correction
// check for midpoints
if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
&& (fstar.w[1] || fstar.w[0])
&& (fstar.w[1] < ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) {
// the result is a midpoint; round to nearest
if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
// if floor(C*) is odd C = floor(C*) - 1; the result >= 1
Cstar.w[0]--; // Cstar.w[0] is now even
is_midpoint_gt_even = 1;
is_inexact_lt_midpoint = 0;
} else { // else MP in [ODD, EVEN]
is_inexact_lt_midpoint = 0;
}
}
// general correction for RM
if (is_midpoint_gt_even || is_inexact_lt_midpoint) {
Cstar.w[0] = Cstar.w[0] + 1;
} else {
; // the result is already correct
}
res = Cstar.w[0];
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
res = C1.w[0];
} else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
res = C1.w[0] * ten2k64[exp];
}
}
}
BID_RETURN (res);
}
/*****************************************************************************
* BID128_to_uint32_xceil
****************************************************************************/
BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (unsigned int,
bid128_to_uint32_xceil, x)
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
// Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
UINT64 tmp64, tmp64A;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1, C;
UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
UINT256 fstar;
UINT256 P256;
int is_inexact_lt_midpoint = 0;
int is_midpoint_gt_even = 0;
// unpack x
x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1.w[1] = x.w[1] & MASK_COEFF;
C1.w[0] = x.w[0];
// check for NaN or Infinity
if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
// x is special
if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is QNaN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
} else { // x is not a NaN, so it must be infinity
if (!x_sign) { // x is +inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is -inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
}
}
// check for non-canonical values (after the check for special values)
if ((C1.w[1] > 0x0001ed09bead87c0ull)
|| (C1.w[1] == 0x0001ed09bead87c0ull
&& (C1.w[0] > 0x378d8e63ffffffffull))
|| ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
res = 0x00000000;
BID_RETURN (res);
} else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
// x is 0
res = 0x00000000;
BID_RETURN (res);
} else { // x is not special and is not zero
// q = nr. of decimal digits in x
// determine first the nr. of bits in x
if (C1.w[1] == 0) {
if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
} else { // x < 2^32
tmp1.d = (double) (C1.w[0]); // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // if x < 2^53
tmp1.d = (double) C1.w[0]; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
tmp1.d = (double) C1.w[1]; // exact conversion
x_nr_bits =
65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
q = nr_digits[x_nr_bits - 1].digits1;
if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
|| (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
&& C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
q++;
}
exp = (x_exp >> 49) - 6176;
if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
} else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in a signed 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
if (x_sign) { // if n < 0 and q + exp = 10
// if n <= -1 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 1
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x50000000a, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x0aull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0x0a <=>
// C >= 0x0a * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 1 up)
tmp64 = 0x0aull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
} else { // if n > 0 and q + exp = 10
// if n > 2^32 - 1 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^32 - 1
// too large if 0.c(0)c(1)...c(q-1) * 10^11 > 0x9fffffff6, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 > 0x9fffffff6ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 > 0x9fffffff6 <=>
// C > 0x9fffffff6 * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 2^32 up)
tmp64 = 0x9fffffff6ull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
}
}
// n is not too large to be converted to int32: -2^32-1 < n <= 2^32-1
// Note: some of the cases tested for above fall through to this point
if ((q + exp) <= 0) {
// n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1)
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// return 0
if (x_sign)
res = 0x00000000;
else
res = 0x00000001;
BID_RETURN (res);
} else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
// -2^32-1 < x <= -1 or 1 <= x <= 2^32-1 so x can be rounded
// toward positive infinity to a 32-bit signed integer
if (x_sign) { // x <= -1 is invalid
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// x > 0 from this point on
if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
C1.w[0] = C1.w[0] + midpoint64[ind - 1];
} else {
C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
}
if (C1.w[0] < tmp64)
C1.w[1]++;
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
// 1 <= x <= 33
// kx = 10^(-x) = ten2mk128[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 118 bits
__mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]);
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[1] = P256.w[3];
Cstar.w[0] = P256.w[2];
fstar.w[3] = 0;
fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
} else { // 22 <= ind - 1 <= 33
Cstar.w[1] = 0;
Cstar.w[0] = P256.w[3];
fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
fstar.w[2] = P256.w[2];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
}
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g.
// if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
// shift right C* by Ex-128 = shiftright128[ind]
shift = shiftright128[ind - 1]; // 0 <= shift <= 102
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[0] =
(Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
// redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
} else { // 22 <= ind - 1 <= 33
Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
}
// determine inexactness of the rounding of C*
// if (0 < f* - 1/2 < 10^(-x)) then
// the result is exact
// else // if (f* - 1/2 > T*) then
// the result is inexact
if (ind - 1 <= 2) {
if (fstar.w[1] > 0x8000000000000000ull ||
(fstar.w[1] == 0x8000000000000000ull
&& fstar.w[0] > 0x0ull)) {
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
if (tmp64 > ten2mk128trunc[ind - 1].w[1]
|| (tmp64 == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
is_inexact_lt_midpoint = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
}
} else if (ind - 1 <= 21) { // if 3 <= ind <= 21
if (fstar.w[3] > 0x0 ||
(fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) ||
(fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] &&
(fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[2] - onehalf128[ind - 1];
tmp64A = fstar.w[3];
if (tmp64 > fstar.w[2])
tmp64A--;
if (tmp64A || tmp64
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
is_inexact_lt_midpoint = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
}
} else { // if 22 <= ind <= 33
if (fstar.w[3] > onehalf128[ind - 1] ||
(fstar.w[3] == onehalf128[ind - 1] &&
(fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[3] - onehalf128[ind - 1];
if (tmp64 || fstar.w[2]
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
is_inexact_lt_midpoint = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
}
}
// if the result was a midpoint it was rounded away from zero, so
// it will need a correction
// check for midpoints
if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
&& (fstar.w[1] || fstar.w[0])
&& (fstar.w[1] < ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) {
// the result is a midpoint; round to nearest
if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
// if floor(C*) is odd C = floor(C*) - 1; the result >= 1
Cstar.w[0]--; // Cstar.w[0] is now even
is_midpoint_gt_even = 1;
is_inexact_lt_midpoint = 0;
} else { // else MP in [ODD, EVEN]
is_inexact_lt_midpoint = 0;
}
}
// general correction for RM
if (is_midpoint_gt_even || is_inexact_lt_midpoint) {
Cstar.w[0] = Cstar.w[0] + 1;
} else {
; // the result is already correct
}
res = Cstar.w[0];
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
res = C1.w[0];
} else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
res = C1.w[0] * ten2k64[exp];
}
}
}
BID_RETURN (res);
}
/*****************************************************************************
* BID128_to_uint32_int
****************************************************************************/
BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (unsigned int,
bid128_to_uint32_int, x)
int res;
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
// Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
UINT64 tmp64, tmp64A;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1, C;
UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
UINT256 fstar;
UINT256 P256;
int is_inexact_gt_midpoint = 0;
int is_midpoint_lt_even = 0;
// unpack x
x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1.w[1] = x.w[1] & MASK_COEFF;
C1.w[0] = x.w[0];
// check for NaN or Infinity
if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
// x is special
if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is QNaN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
} else { // x is not a NaN, so it must be infinity
if (!x_sign) { // x is +inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is -inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
}
}
// check for non-canonical values (after the check for special values)
if ((C1.w[1] > 0x0001ed09bead87c0ull)
|| (C1.w[1] == 0x0001ed09bead87c0ull
&& (C1.w[0] > 0x378d8e63ffffffffull))
|| ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
res = 0x00000000;
BID_RETURN (res);
} else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
// x is 0
res = 0x00000000;
BID_RETURN (res);
} else { // x is not special and is not zero
// q = nr. of decimal digits in x
// determine first the nr. of bits in x
if (C1.w[1] == 0) {
if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
} else { // x < 2^32
tmp1.d = (double) (C1.w[0]); // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // if x < 2^53
tmp1.d = (double) C1.w[0]; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
tmp1.d = (double) C1.w[1]; // exact conversion
x_nr_bits =
65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
q = nr_digits[x_nr_bits - 1].digits1;
if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
|| (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
&& C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
q++;
}
exp = (x_exp >> 49) - 6176;
if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
} else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in a signed 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
if (x_sign) { // if n < 0 and q + exp = 10
// if n <= -1 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 1
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x0a, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x0aull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit uint fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0x0a <=>
// C >= 0x0a * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 1 up)
tmp64 = 0x0aull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
} else { // if n > 0 and q + exp = 10
// if n >= 2^32 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32
// too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0xa00000000, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0xa00000000ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit uint fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0xa00000000 <=>
// C >= 0xa00000000 * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 2^32 up)
tmp64 = 0xa00000000ull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
}
}
// n is not too large to be converted to uint32: -2^32 < n < 2^32
// Note: some of the cases tested for above fall through to this point
if ((q + exp) <= 0) {
// n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1)
// return 0
res = 0x00000000;
BID_RETURN (res);
} else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
// x = d(0)...d(k).d(k+1)..., k >= 0, d(0) != 0
if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// x > 0 from this point on
// 1 <= x < 2^32 so x can be rounded to zero to a 32-bit unsigned integer
if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
C1.w[0] = C1.w[0] + midpoint64[ind - 1];
} else {
C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
}
if (C1.w[0] < tmp64)
C1.w[1]++;
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
// 1 <= x <= 33
// kx = 10^(-x) = ten2mk128[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 118 bits
__mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]);
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[1] = P256.w[3];
Cstar.w[0] = P256.w[2];
fstar.w[3] = 0;
fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
} else { // 22 <= ind - 1 <= 33
Cstar.w[1] = 0;
Cstar.w[0] = P256.w[3];
fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
fstar.w[2] = P256.w[2];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
}
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g.
// if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
// shift right C* by Ex-128 = shiftright128[ind]
shift = shiftright128[ind - 1]; // 0 <= shift <= 102
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[0] =
(Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
// redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
} else { // 22 <= ind - 1 <= 33
Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
}
// determine inexactness of the rounding of C*
// if (0 < f* - 1/2 < 10^(-x)) then
// the result is exact
// else // if (f* - 1/2 > T*) then
// the result is inexact
if (ind - 1 <= 2) {
if (fstar.w[1] > 0x8000000000000000ull ||
(fstar.w[1] == 0x8000000000000000ull
&& fstar.w[0] > 0x0ull)) {
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
if (tmp64 > ten2mk128trunc[ind - 1].w[1]
|| (tmp64 == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) {
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
is_inexact_gt_midpoint = 1;
}
} else if (ind - 1 <= 21) { // if 3 <= ind <= 21
if (fstar.w[3] > 0x0 ||
(fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) ||
(fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] &&
(fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[2] - onehalf128[ind - 1];
tmp64A = fstar.w[3];
if (tmp64 > fstar.w[2])
tmp64A--;
if (tmp64A || tmp64
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
is_inexact_gt_midpoint = 1;
}
} else { // if 22 <= ind <= 33
if (fstar.w[3] > onehalf128[ind - 1] ||
(fstar.w[3] == onehalf128[ind - 1] &&
(fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[3] - onehalf128[ind - 1];
if (tmp64 || fstar.w[2]
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
is_inexact_gt_midpoint = 1;
}
}
// if the result was a midpoint it was rounded away from zero, so
// it will need a correction
// check for midpoints
if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
&& (fstar.w[1] || fstar.w[0])
&& (fstar.w[1] < ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) {
// the result is a midpoint; round to nearest
if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
// if floor(C*) is odd C = floor(C*) - 1; the result >= 1
Cstar.w[0]--; // Cstar.w[0] is now even
is_inexact_gt_midpoint = 0;
} else { // else MP in [ODD, EVEN]
is_midpoint_lt_even = 1;
is_inexact_gt_midpoint = 0;
}
}
// general correction for RZ
if (is_midpoint_lt_even || is_inexact_gt_midpoint) {
Cstar.w[0] = Cstar.w[0] - 1;
} else {
; // exact, the result is already correct
}
res = Cstar.w[0];
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
res = C1.w[0];
} else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
res = C1.w[0] * ten2k64[exp];
}
}
}
BID_RETURN (res);
}
/*****************************************************************************
* BID128_to_uint32_xint
****************************************************************************/
BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (unsigned int,
bid128_to_uint32_xint, x)
int res;
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
// Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
UINT64 tmp64, tmp64A;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1, C;
UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
UINT256 fstar;
UINT256 P256;
int is_inexact_gt_midpoint = 0;
int is_midpoint_lt_even = 0;
// unpack x
x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1.w[1] = x.w[1] & MASK_COEFF;
C1.w[0] = x.w[0];
// check for NaN or Infinity
if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
// x is special
if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is QNaN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
} else { // x is not a NaN, so it must be infinity
if (!x_sign) { // x is +inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is -inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
}
}
// check for non-canonical values (after the check for special values)
if ((C1.w[1] > 0x0001ed09bead87c0ull)
|| (C1.w[1] == 0x0001ed09bead87c0ull
&& (C1.w[0] > 0x378d8e63ffffffffull))
|| ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
res = 0x00000000;
BID_RETURN (res);
} else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
// x is 0
res = 0x00000000;
BID_RETURN (res);
} else { // x is not special and is not zero
// q = nr. of decimal digits in x
// determine first the nr. of bits in x
if (C1.w[1] == 0) {
if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
} else { // x < 2^32
tmp1.d = (double) (C1.w[0]); // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // if x < 2^53
tmp1.d = (double) C1.w[0]; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
tmp1.d = (double) C1.w[1]; // exact conversion
x_nr_bits =
65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
q = nr_digits[x_nr_bits - 1].digits1;
if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
|| (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
&& C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
q++;
}
exp = (x_exp >> 49) - 6176;
if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
} else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in a signed 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
if (x_sign) { // if n < 0 and q + exp = 10
// if n <= -1 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 1
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x0a, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x0aull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit uint fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0x0a <=>
// C >= 0x0a * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 1 up)
tmp64 = 0x0aull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
} else { // if n > 0 and q + exp = 10
// if n >= 2^32 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32
// too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0xa00000000, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0xa00000000ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit uint fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0xa00000000 <=>
// C >= 0xa00000000 * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 2^32 up)
tmp64 = 0xa00000000ull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
}
}
// n is not too large to be converted to uint32: -2^32 < n < 2^32
// Note: some of the cases tested for above fall through to this point
if ((q + exp) <= 0) {
// n = +/-0.0...c(0)c(1)...c(q-1) or n = +/-0.c(0)c(1)...c(q-1)
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// return 0
res = 0x00000000;
BID_RETURN (res);
} else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
// x = d(0)...d(k).d(k+1)..., k >= 0, d(0) != 0
if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// x > 0 from this point on
// 1 <= x < 2^32 so x can be rounded to zero to a 32-bit unsigned integer
if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
C1.w[0] = C1.w[0] + midpoint64[ind - 1];
} else {
C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
}
if (C1.w[0] < tmp64)
C1.w[1]++;
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
// 1 <= x <= 33
// kx = 10^(-x) = ten2mk128[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 118 bits
__mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]);
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[1] = P256.w[3];
Cstar.w[0] = P256.w[2];
fstar.w[3] = 0;
fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
} else { // 22 <= ind - 1 <= 33
Cstar.w[1] = 0;
Cstar.w[0] = P256.w[3];
fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
fstar.w[2] = P256.w[2];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
}
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g.
// if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
// shift right C* by Ex-128 = shiftright128[ind]
shift = shiftright128[ind - 1]; // 0 <= shift <= 102
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[0] =
(Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
// redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
} else { // 22 <= ind - 1 <= 33
Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
}
// determine inexactness of the rounding of C*
// if (0 < f* - 1/2 < 10^(-x)) then
// the result is exact
// else // if (f* - 1/2 > T*) then
// the result is inexact
if (ind - 1 <= 2) {
if (fstar.w[1] > 0x8000000000000000ull ||
(fstar.w[1] == 0x8000000000000000ull
&& fstar.w[0] > 0x0ull)) {
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
if (tmp64 > ten2mk128trunc[ind - 1].w[1]
|| (tmp64 == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
is_inexact_gt_midpoint = 1;
}
} else if (ind - 1 <= 21) { // if 3 <= ind <= 21
if (fstar.w[3] > 0x0 ||
(fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) ||
(fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] &&
(fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[2] - onehalf128[ind - 1];
tmp64A = fstar.w[3];
if (tmp64 > fstar.w[2])
tmp64A--;
if (tmp64A || tmp64
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
is_inexact_gt_midpoint = 1;
}
} else { // if 22 <= ind <= 33
if (fstar.w[3] > onehalf128[ind - 1] ||
(fstar.w[3] == onehalf128[ind - 1] &&
(fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[3] - onehalf128[ind - 1];
if (tmp64 || fstar.w[2]
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
is_inexact_gt_midpoint = 1;
}
}
// if the result was a midpoint it was rounded away from zero, so
// it will need a correction
// check for midpoints
if ((fstar.w[3] == 0) && (fstar.w[2] == 0)
&& (fstar.w[1] || fstar.w[0])
&& (fstar.w[1] < ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] <= ten2mk128trunc[ind - 1].w[0]))) {
// the result is a midpoint; round to nearest
if (Cstar.w[0] & 0x01) { // Cstar.w[0] is odd; MP in [EVEN, ODD]
// if floor(C*) is odd C = floor(C*) - 1; the result >= 1
Cstar.w[0]--; // Cstar.w[0] is now even
is_inexact_gt_midpoint = 0;
} else { // else MP in [ODD, EVEN]
is_midpoint_lt_even = 1;
is_inexact_gt_midpoint = 0;
}
}
// general correction for RZ
if (is_midpoint_lt_even || is_inexact_gt_midpoint) {
Cstar.w[0] = Cstar.w[0] - 1;
} else {
; // exact, the result is already correct
}
res = Cstar.w[0];
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
res = C1.w[0];
} else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
res = C1.w[0] * ten2k64[exp];
}
}
}
BID_RETURN (res);
}
/*****************************************************************************
* BID128_to_uint32_rninta
****************************************************************************/
BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (unsigned int,
bid128_to_uint32_rninta, x)
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
// Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1, C;
UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
UINT256 P256;
// unpack x
x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1.w[1] = x.w[1] & MASK_COEFF;
C1.w[0] = x.w[0];
// check for NaN or Infinity
if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
// x is special
if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is QNaN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
} else { // x is not a NaN, so it must be infinity
if (!x_sign) { // x is +inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is -inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
}
}
// check for non-canonical values (after the check for special values)
if ((C1.w[1] > 0x0001ed09bead87c0ull)
|| (C1.w[1] == 0x0001ed09bead87c0ull
&& (C1.w[0] > 0x378d8e63ffffffffull))
|| ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
res = 0x00000000;
BID_RETURN (res);
} else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
// x is 0
res = 0x00000000;
BID_RETURN (res);
} else { // x is not special and is not zero
// q = nr. of decimal digits in x
// determine first the nr. of bits in x
if (C1.w[1] == 0) {
if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
} else { // x < 2^32
tmp1.d = (double) (C1.w[0]); // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // if x < 2^53
tmp1.d = (double) C1.w[0]; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
tmp1.d = (double) C1.w[1]; // exact conversion
x_nr_bits =
65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
q = nr_digits[x_nr_bits - 1].digits1;
if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
|| (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
&& C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
q++;
}
exp = (x_exp >> 49) - 6176;
if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
} else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in a signed 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
if (x_sign) { // if n < 0 and q + exp = 10
// if n <= -1/2 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 1/2
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x05, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x05ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0x05 <=>
// C >= 0x05 * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 1/2 up)
tmp64 = 0x05ull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
} else { // if n > 0 and q + exp = 10
// if n >= 2^32 - 1/2 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32-1/2
// too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x9fffffffbull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb <=>
// C >= 0x9fffffffb * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 2^32-1/2 up)
tmp64 = 0x9fffffffbull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
}
}
// n is not too large to be converted to int32: -1/2 < n < 2^32 - 1/2
// Note: some of the cases tested for above fall through to this point
if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
// return 0
res = 0x00000000;
BID_RETURN (res);
} else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
// if 0.c(0)c(1)...c(q-1) < 0.5 <=> c(0)c(1)...c(q-1) < 5 * 10^(q-1)
// res = 0
// else if x > 0
// res = +1
// else // if x < 0
// invalid exc
ind = q - 1;
if (ind <= 18) { // 0 <= ind <= 18
if ((C1.w[1] == 0) && (C1.w[0] < midpoint64[ind])) {
res = 0x00000000; // return 0
} else if (!x_sign) { // n > 0
res = 0x00000001; // return +1
} else {
res = 0x80000000;
*pfpsf |= INVALID_EXCEPTION;
}
} else { // 19 <= ind <= 33
if ((C1.w[1] < midpoint128[ind - 19].w[1])
|| ((C1.w[1] == midpoint128[ind - 19].w[1])
&& (C1.w[0] < midpoint128[ind - 19].w[0]))) {
res = 0x00000000; // return 0
} else if (!x_sign) { // n > 0
res = 0x00000001; // return +1
} else {
res = 0x80000000;
*pfpsf |= INVALID_EXCEPTION;
}
}
} else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// 1 <= x < 2^31-1/2 so x can be rounded
// to nearest-away to a 32-bit signed integer
if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
C1.w[0] = C1.w[0] + midpoint64[ind - 1];
} else {
C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
}
if (C1.w[0] < tmp64)
C1.w[1]++;
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
// 1 <= x <= 33
// kx = 10^(-x) = ten2mk128[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 118 bits
__mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]);
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[1] = P256.w[3];
Cstar.w[0] = P256.w[2];
} else { // 22 <= ind - 1 <= 33
Cstar.w[1] = 0;
Cstar.w[0] = P256.w[3];
}
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g.
// if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
// shift right C* by Ex-128 = shiftright128[ind]
shift = shiftright128[ind - 1]; // 0 <= shift <= 102
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[0] =
(Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
// redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
} else { // 22 <= ind - 1 <= 33
Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
}
// if the result was a midpoint, it was already rounded away from zero
res = Cstar.w[0]; // always positive
// no need to check for midpoints - already rounded away from zero!
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
res = C1.w[0];
} else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
res = C1.w[0] * ten2k64[exp];
}
}
}
BID_RETURN (res);
}
/*****************************************************************************
* BID128_to_uint32_xrninta
****************************************************************************/
BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (unsigned int,
bid128_to_uint32_xrninta, x)
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
// Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo (all are UINT64)
UINT64 tmp64, tmp64A;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1, C;
UINT128 Cstar; // C* represents up to 34 decimal digits ~ 113 bits
UINT256 fstar;
UINT256 P256;
unsigned int tmp_inexact = 0;
// unpack x
x_sign = x.w[1] & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1.w[1] = x.w[1] & MASK_COEFF;
C1.w[0] = x.w[0];
// check for NaN or Infinity
if ((x.w[1] & MASK_SPECIAL) == MASK_SPECIAL) {
// x is special
if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is QNaN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
} else { // x is not a NaN, so it must be infinity
if (!x_sign) { // x is +inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
} else { // x is -inf
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
}
BID_RETURN (res);
}
}
// check for non-canonical values (after the check for special values)
if ((C1.w[1] > 0x0001ed09bead87c0ull)
|| (C1.w[1] == 0x0001ed09bead87c0ull
&& (C1.w[0] > 0x378d8e63ffffffffull))
|| ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull)) {
res = 0x00000000;
BID_RETURN (res);
} else if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
// x is 0
res = 0x00000000;
BID_RETURN (res);
} else { // x is not special and is not zero
// q = nr. of decimal digits in x
// determine first the nr. of bits in x
if (C1.w[1] == 0) {
if (C1.w[0] >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
if (C1.w[0] >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1.w[0] >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
} else { // x < 2^32
tmp1.d = (double) (C1.w[0]); // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // if x < 2^53
tmp1.d = (double) C1.w[0]; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
} else { // C1.w[1] != 0 => nr. bits = 64 + nr_bits (C1.w[1])
tmp1.d = (double) C1.w[1]; // exact conversion
x_nr_bits =
65 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
q = nr_digits[x_nr_bits - 1].digits1;
if (C1.w[1] > nr_digits[x_nr_bits - 1].threshold_hi
|| (C1.w[1] == nr_digits[x_nr_bits - 1].threshold_hi
&& C1.w[0] >= nr_digits[x_nr_bits - 1].threshold_lo))
q++;
}
exp = (x_exp >> 49) - 6176;
if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
} else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in a signed 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
if (x_sign) { // if n < 0 and q + exp = 10
// if n <= -1/2 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 1/2
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x05, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x05ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0x05 <=>
// C >= 0x05 * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 1/2 up)
tmp64 = 0x05ull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
} else { // if n > 0 and q + exp = 10
// if n >= 2^32 - 1/2 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32-1/2
// too large if 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb, 1<=q<=34
if (q <= 11) {
tmp64 = C1.w[0] * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x9fffffffbull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
} else { // if (q > 11), i.e. 12 <= q <= 34 and so -24 <= exp <= -2
// 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb <=>
// C >= 0x9fffffffb * 10^(q-11) where 1 <= q - 11 <= 23
// (scale 2^32-1/2 up)
tmp64 = 0x9fffffffbull;
if (q - 11 <= 19) { // 1 <= q - 11 <= 19; 10^(q-11) requires 64 bits
__mul_64x64_to_128MACH (C, tmp64, ten2k64[q - 11]);
} else { // 20 <= q - 11 <= 23, and 10^(q-11) requires 128 bits
__mul_128x64_to_128 (C, tmp64, ten2k128[q - 31]);
}
if (C1.w[1] > C.w[1]
|| (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// else cases that can be rounded to a 32-bit int fall through
// to '1 <= q + exp <= 10'
}
}
}
// n is not too large to be converted to int32: -1/2 < n < 2^32 - 1/2
// Note: some of the cases tested for above fall through to this point
if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// return 0
res = 0x00000000;
BID_RETURN (res);
} else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
// if 0.c(0)c(1)...c(q-1) < 0.5 <=> c(0)c(1)...c(q-1) < 5 * 10^(q-1)
// res = 0
// else if x > 0
// res = +1
// else // if x < 0
// invalid exc
ind = q - 1;
if (ind <= 18) { // 0 <= ind <= 18
if ((C1.w[1] == 0) && (C1.w[0] < midpoint64[ind])) {
res = 0x00000000; // return 0
} else if (!x_sign) { // n > 0
res = 0x00000001; // return +1
} else {
res = 0x80000000;
*pfpsf |= INVALID_EXCEPTION;
BID_RETURN (res);
}
} else { // 19 <= ind <= 33
if ((C1.w[1] < midpoint128[ind - 19].w[1])
|| ((C1.w[1] == midpoint128[ind - 19].w[1])
&& (C1.w[0] < midpoint128[ind - 19].w[0]))) {
res = 0x00000000; // return 0
} else if (!x_sign) { // n > 0
res = 0x00000001; // return +1
} else {
res = 0x80000000;
*pfpsf |= INVALID_EXCEPTION;
BID_RETURN (res);
}
}
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
} else { // if (1 <= q + exp <= 10, 1 <= q <= 34, -33 <= exp <= 9)
if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// 1 <= x < 2^31-1/2 so x can be rounded
// to nearest-away to a 32-bit signed integer
if (exp < 0) { // 2 <= q <= 34, -33 <= exp <= -1, 1 <= q + exp <= 10
ind = -exp; // 1 <= ind <= 33; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
C1.w[0] = C1.w[0] + midpoint64[ind - 1];
} else {
C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
}
if (C1.w[0] < tmp64)
C1.w[1]++;
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
// 1 <= x <= 33
// kx = 10^(-x) = ten2mk128[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 118 bits
__mul_128x128_to_256 (P256, C1, ten2mk128[ind - 1]);
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[1] = P256.w[3];
Cstar.w[0] = P256.w[2];
fstar.w[3] = 0;
fstar.w[2] = P256.w[2] & maskhigh128[ind - 1];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
} else { // 22 <= ind - 1 <= 33
Cstar.w[1] = 0;
Cstar.w[0] = P256.w[3];
fstar.w[3] = P256.w[3] & maskhigh128[ind - 1];
fstar.w[2] = P256.w[2];
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
}
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind], e.g.
// if x=1, T*=ten2mk128trunc[0]=0x19999999999999999999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
// shift right C* by Ex-128 = shiftright128[ind]
shift = shiftright128[ind - 1]; // 0 <= shift <= 102
if (ind - 1 <= 21) { // 0 <= ind - 1 <= 21
Cstar.w[0] =
(Cstar.w[0] >> shift) | (Cstar.w[1] << (64 - shift));
// redundant, it will be 0! Cstar.w[1] = (Cstar.w[1] >> shift);
} else { // 22 <= ind - 1 <= 33
Cstar.w[0] = (Cstar.w[0] >> (shift - 64)); // 2 <= shift - 64 <= 38
}
// if the result was a midpoint, it was already rounded away from zero
// determine inexactness of the rounding of C*
// if (0 < f* - 1/2 < 10^(-x)) then
// the result is exact
// else // if (f* - 1/2 > T*) then
// the result is inexact
if (ind - 1 <= 2) {
if (fstar.w[1] > 0x8000000000000000ull ||
(fstar.w[1] == 0x8000000000000000ull
&& fstar.w[0] > 0x0ull)) {
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[1] - 0x8000000000000000ull; // f* - 1/2
if (tmp64 > ten2mk128trunc[ind - 1].w[1]
|| (tmp64 == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] >= ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
}
} else if (ind - 1 <= 21) { // if 3 <= ind <= 21
if (fstar.w[3] > 0x0 ||
(fstar.w[3] == 0x0 && fstar.w[2] > onehalf128[ind - 1]) ||
(fstar.w[3] == 0x0 && fstar.w[2] == onehalf128[ind - 1] &&
(fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[2] - onehalf128[ind - 1];
tmp64A = fstar.w[3];
if (tmp64 > fstar.w[2])
tmp64A--;
if (tmp64A || tmp64
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
}
} else { // if 22 <= ind <= 33
if (fstar.w[3] > onehalf128[ind - 1] ||
(fstar.w[3] == onehalf128[ind - 1] &&
(fstar.w[2] || fstar.w[1] || fstar.w[0]))) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
tmp64 = fstar.w[3] - onehalf128[ind - 1];
if (tmp64 || fstar.w[2]
|| fstar.w[1] > ten2mk128trunc[ind - 1].w[1]
|| (fstar.w[1] == ten2mk128trunc[ind - 1].w[1]
&& fstar.w[0] > ten2mk128trunc[ind - 1].w[0])) {
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
} // else the result is exact
} else { // the result is inexact; f2* <= 1/2
// set the inexact flag
// *pfpsf |= INEXACT_EXCEPTION;
tmp_inexact = 1;
}
}
// no need to check for midpoints - already rounded away from zero!
res = Cstar.w[0]; // the result is positive
if (tmp_inexact)
*pfpsf |= INEXACT_EXCEPTION;
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
res = C1.w[0];
} else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
res = C1.w[0] * ten2k64[exp];
}
}
}
BID_RETURN (res);
}