c6abe94dc1
2005-04-13 Thomas Koenig <Thomas.Koenig@online.de> PR libfortran/19106 * m4/iforeach.c (name`'rtype_qual`_'atype_code): Add TODO that setting correct strides is a front end job. (`m'name`'rtype_qual`_'atype_code): Likewise. If mask has a lowest stride of 0, adjust to 1. * m4/ifunction.c (name`'rtype_qual`_'atype_code): Add TODO that setting correct strides is a front end job. (`m'name`'rtype_qual`_'atype_code): Likewise. If mask has a lowest stride of 0, adjust to 1. * maxloc0_4_i4.c: Regenerated * maxloc0_4_i8.c: Regenerated * maxloc0_4_r4.c: Regenerated * maxloc0_4_r8.c: Regenerated * maxloc0_8_i4.c: Regenerated * maxloc0_8_i8.c: Regenerated * maxloc0_8_r4.c: Regenerated * maxloc0_8_r8.c: Regenerated * maxloc1_4_i4.c: Regenerated * maxloc1_4_i8.c: Regenerated * maxloc1_4_r4.c: Regenerated * maxloc1_4_r8.c: Regenerated * maxloc1_8_i4.c: Regenerated * maxloc1_8_i8.c: Regenerated * maxloc1_8_r4.c: Regenerated * maxloc1_8_r8.c: Regenerated * maxval_i4.c: Regenerated * maxval_i8.c: Regenerated * maxval_r4.c: Regenerated * maxval_r8.c: Regenerated * minloc0_4_i4.c: Regenerated * minloc0_4_i8.c: Regenerated * minloc0_4_r4.c: Regenerated * minloc0_4_r8.c: Regenerated * minloc0_8_i4.c: Regenerated * minloc0_8_i8.c: Regenerated * minloc0_8_r4.c: Regenerated * minloc0_8_r8.c: Regenerated * minloc1_4_i4.c: Regenerated * minloc1_4_i8.c: Regenerated * minloc1_4_r4.c: Regenerated * minloc1_4_r8.c: Regenerated * minloc1_8_i4.c: Regenerated * minloc1_8_i8.c: Regenerated * minloc1_8_r4.c: Regenerated * minloc1_8_r8.c: Regenerated * minval_i4.c: Regenerated * minval_i8.c: Regenerated * minval_r4.c: Regenerated * minval_r8.c: Regenerated * product_c4.c: Regenerated * product_c8.c: Regenerated * product_i4.c: Regenerated * product_i8.c: Regenerated * product_r4.c: Regenerated * product_r8.c: Regenerated * sum_c4.c: Regenerated * sum_c8.c: Regenerated * sum_i4.c: Regenerated * sum_i8.c: Regenerated * sum_r4.c: Regenerated * sum_r8.c: Regenerated 2005-04-13 Thomas Koenig <Thomas.Koenig@online.de> PR libfortran/19016 * gfortran.fortran-torture/execute/intrinsic_mmloc.f90: Add tests with mask generated by expression. * gfortran.fortran-torture/execute/intrinsic_mmval.f90: Likewise. * gfortran.fortran-torture/execute/intrinsic_product.f90: Likewise. * gfortran.fortran-torture/execute/intrinsic_sum.f90: Likewise. From-SVN: r98052
325 lines
8.4 KiB
C
325 lines
8.4 KiB
C
/* Implementation of the PRODUCT intrinsic
|
|
Copyright 2002 Free Software Foundation, Inc.
|
|
Contributed by Paul Brook <paul@nowt.org>
|
|
|
|
This file is part of the GNU Fortran 95 runtime library (libgfortran).
|
|
|
|
Libgfortran is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2 of the License, or (at your option) any later version.
|
|
|
|
In addition to the permissions in the GNU General Public License, the
|
|
Free Software Foundation gives you unlimited permission to link the
|
|
compiled version of this file into combinations with other programs,
|
|
and to distribute those combinations without any restriction coming
|
|
from the use of this file. (The General Public License restrictions
|
|
do apply in other respects; for example, they cover modification of
|
|
the file, and distribution when not linked into a combine
|
|
executable.)
|
|
|
|
Libgfortran is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public
|
|
License along with libgfortran; see the file COPYING. If not,
|
|
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
Boston, MA 02111-1307, USA. */
|
|
|
|
#include "config.h"
|
|
#include <stdlib.h>
|
|
#include <assert.h>
|
|
#include "libgfortran.h"
|
|
|
|
|
|
extern void product_r8 (gfc_array_r8 *, gfc_array_r8 *, index_type *);
|
|
export_proto(product_r8);
|
|
|
|
void
|
|
product_r8 (gfc_array_r8 *retarray, gfc_array_r8 *array, index_type *pdim)
|
|
{
|
|
index_type count[GFC_MAX_DIMENSIONS - 1];
|
|
index_type extent[GFC_MAX_DIMENSIONS - 1];
|
|
index_type sstride[GFC_MAX_DIMENSIONS - 1];
|
|
index_type dstride[GFC_MAX_DIMENSIONS - 1];
|
|
GFC_REAL_8 *base;
|
|
GFC_REAL_8 *dest;
|
|
index_type rank;
|
|
index_type n;
|
|
index_type len;
|
|
index_type delta;
|
|
index_type dim;
|
|
|
|
/* Make dim zero based to avoid confusion. */
|
|
dim = (*pdim) - 1;
|
|
rank = GFC_DESCRIPTOR_RANK (array) - 1;
|
|
if (array->dim[0].stride == 0)
|
|
array->dim[0].stride = 1;
|
|
|
|
len = array->dim[dim].ubound + 1 - array->dim[dim].lbound;
|
|
delta = array->dim[dim].stride;
|
|
|
|
for (n = 0; n < dim; n++)
|
|
{
|
|
sstride[n] = array->dim[n].stride;
|
|
extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound;
|
|
}
|
|
for (n = dim; n < rank; n++)
|
|
{
|
|
sstride[n] = array->dim[n + 1].stride;
|
|
extent[n] =
|
|
array->dim[n + 1].ubound + 1 - array->dim[n + 1].lbound;
|
|
}
|
|
|
|
if (retarray->data == NULL)
|
|
{
|
|
for (n = 0; n < rank; n++)
|
|
{
|
|
retarray->dim[n].lbound = 0;
|
|
retarray->dim[n].ubound = extent[n]-1;
|
|
if (n == 0)
|
|
retarray->dim[n].stride = 1;
|
|
else
|
|
retarray->dim[n].stride = retarray->dim[n-1].stride * extent[n-1];
|
|
}
|
|
|
|
retarray->data
|
|
= internal_malloc_size (sizeof (GFC_REAL_8)
|
|
* retarray->dim[rank-1].stride
|
|
* extent[rank-1]);
|
|
retarray->base = 0;
|
|
retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank;
|
|
}
|
|
else
|
|
{
|
|
if (retarray->dim[0].stride == 0)
|
|
retarray->dim[0].stride = 1;
|
|
|
|
if (rank != GFC_DESCRIPTOR_RANK (retarray))
|
|
runtime_error ("rank of return array incorrect");
|
|
}
|
|
|
|
for (n = 0; n < rank; n++)
|
|
{
|
|
count[n] = 0;
|
|
dstride[n] = retarray->dim[n].stride;
|
|
if (extent[n] <= 0)
|
|
len = 0;
|
|
}
|
|
|
|
base = array->data;
|
|
dest = retarray->data;
|
|
|
|
while (base)
|
|
{
|
|
GFC_REAL_8 *src;
|
|
GFC_REAL_8 result;
|
|
src = base;
|
|
{
|
|
|
|
result = 1;
|
|
if (len <= 0)
|
|
*dest = 1;
|
|
else
|
|
{
|
|
for (n = 0; n < len; n++, src += delta)
|
|
{
|
|
|
|
result *= *src;
|
|
}
|
|
*dest = result;
|
|
}
|
|
}
|
|
/* Advance to the next element. */
|
|
count[0]++;
|
|
base += sstride[0];
|
|
dest += dstride[0];
|
|
n = 0;
|
|
while (count[n] == extent[n])
|
|
{
|
|
/* When we get to the end of a dimension, reset it and increment
|
|
the next dimension. */
|
|
count[n] = 0;
|
|
/* We could precalculate these products, but this is a less
|
|
frequently used path so proabably not worth it. */
|
|
base -= sstride[n] * extent[n];
|
|
dest -= dstride[n] * extent[n];
|
|
n++;
|
|
if (n == rank)
|
|
{
|
|
/* Break out of the look. */
|
|
base = NULL;
|
|
break;
|
|
}
|
|
else
|
|
{
|
|
count[n]++;
|
|
base += sstride[n];
|
|
dest += dstride[n];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
extern void mproduct_r8 (gfc_array_r8 *, gfc_array_r8 *, index_type *,
|
|
gfc_array_l4 *);
|
|
export_proto(mproduct_r8);
|
|
|
|
void
|
|
mproduct_r8 (gfc_array_r8 * retarray, gfc_array_r8 * array,
|
|
index_type *pdim, gfc_array_l4 * mask)
|
|
{
|
|
index_type count[GFC_MAX_DIMENSIONS - 1];
|
|
index_type extent[GFC_MAX_DIMENSIONS - 1];
|
|
index_type sstride[GFC_MAX_DIMENSIONS - 1];
|
|
index_type dstride[GFC_MAX_DIMENSIONS - 1];
|
|
index_type mstride[GFC_MAX_DIMENSIONS - 1];
|
|
GFC_REAL_8 *dest;
|
|
GFC_REAL_8 *base;
|
|
GFC_LOGICAL_4 *mbase;
|
|
int rank;
|
|
int dim;
|
|
index_type n;
|
|
index_type len;
|
|
index_type delta;
|
|
index_type mdelta;
|
|
|
|
dim = (*pdim) - 1;
|
|
rank = GFC_DESCRIPTOR_RANK (array) - 1;
|
|
if (array->dim[0].stride == 0)
|
|
array->dim[0].stride = 1;
|
|
|
|
if (mask->dim[0].stride == 0)
|
|
mask->dim[0].stride = 1;
|
|
|
|
len = array->dim[dim].ubound + 1 - array->dim[dim].lbound;
|
|
if (len <= 0)
|
|
return;
|
|
delta = array->dim[dim].stride;
|
|
mdelta = mask->dim[dim].stride;
|
|
|
|
for (n = 0; n < dim; n++)
|
|
{
|
|
sstride[n] = array->dim[n].stride;
|
|
mstride[n] = mask->dim[n].stride;
|
|
extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound;
|
|
}
|
|
for (n = dim; n < rank; n++)
|
|
{
|
|
sstride[n] = array->dim[n + 1].stride;
|
|
mstride[n] = mask->dim[n + 1].stride;
|
|
extent[n] =
|
|
array->dim[n + 1].ubound + 1 - array->dim[n + 1].lbound;
|
|
}
|
|
|
|
if (retarray->data == NULL)
|
|
{
|
|
for (n = 0; n < rank; n++)
|
|
{
|
|
retarray->dim[n].lbound = 0;
|
|
retarray->dim[n].ubound = extent[n]-1;
|
|
if (n == 0)
|
|
retarray->dim[n].stride = 1;
|
|
else
|
|
retarray->dim[n].stride = retarray->dim[n-1].stride * extent[n-1];
|
|
}
|
|
|
|
retarray->data
|
|
= internal_malloc_size (sizeof (GFC_REAL_8)
|
|
* retarray->dim[rank-1].stride
|
|
* extent[rank-1]);
|
|
retarray->base = 0;
|
|
retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank;
|
|
}
|
|
else
|
|
{
|
|
if (retarray->dim[0].stride == 0)
|
|
retarray->dim[0].stride = 1;
|
|
|
|
if (rank != GFC_DESCRIPTOR_RANK (retarray))
|
|
runtime_error ("rank of return array incorrect");
|
|
}
|
|
|
|
for (n = 0; n < rank; n++)
|
|
{
|
|
count[n] = 0;
|
|
dstride[n] = retarray->dim[n].stride;
|
|
if (extent[n] <= 0)
|
|
return;
|
|
}
|
|
|
|
dest = retarray->data;
|
|
base = array->data;
|
|
mbase = mask->data;
|
|
|
|
if (GFC_DESCRIPTOR_SIZE (mask) != 4)
|
|
{
|
|
/* This allows the same loop to be used for all logical types. */
|
|
assert (GFC_DESCRIPTOR_SIZE (mask) == 8);
|
|
for (n = 0; n < rank; n++)
|
|
mstride[n] <<= 1;
|
|
mdelta <<= 1;
|
|
mbase = (GFOR_POINTER_L8_TO_L4 (mbase));
|
|
}
|
|
|
|
while (base)
|
|
{
|
|
GFC_REAL_8 *src;
|
|
GFC_LOGICAL_4 *msrc;
|
|
GFC_REAL_8 result;
|
|
src = base;
|
|
msrc = mbase;
|
|
{
|
|
|
|
result = 1;
|
|
if (len <= 0)
|
|
*dest = 1;
|
|
else
|
|
{
|
|
for (n = 0; n < len; n++, src += delta, msrc += mdelta)
|
|
{
|
|
|
|
if (*msrc)
|
|
result *= *src;
|
|
}
|
|
*dest = result;
|
|
}
|
|
}
|
|
/* Advance to the next element. */
|
|
count[0]++;
|
|
base += sstride[0];
|
|
mbase += mstride[0];
|
|
dest += dstride[0];
|
|
n = 0;
|
|
while (count[n] == extent[n])
|
|
{
|
|
/* When we get to the end of a dimension, reset it and increment
|
|
the next dimension. */
|
|
count[n] = 0;
|
|
/* We could precalculate these products, but this is a less
|
|
frequently used path so proabably not worth it. */
|
|
base -= sstride[n] * extent[n];
|
|
mbase -= mstride[n] * extent[n];
|
|
dest -= dstride[n] * extent[n];
|
|
n++;
|
|
if (n == rank)
|
|
{
|
|
/* Break out of the look. */
|
|
base = NULL;
|
|
break;
|
|
}
|
|
else
|
|
{
|
|
count[n]++;
|
|
base += sstride[n];
|
|
mbase += mstride[n];
|
|
dest += dstride[n];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|