5863aacf5b
2008-05-18 Thomas Koenig <tkoenig@gcc.gnu.org> * m4/in_pack.m4 (internal_pack_'rtype_code`): Destination pointer is restrict. * m4/transpose.m4 (transpose_'rtype_code`): Likewise. * m4/pack.m4 (pack_'rtype_code`): Likewise. * m4/spread.m4 (spread_'rtype_code`): Likewise. (spread_scalar_'rtype_code`): Likewise. * m4/iforeach.m4 (name`'rtype_qual`_'atype_code): Likewise. * m4/eoshift1.m4 (eoshift1): Likewise. * m4/eoshift3.m4 (eoshift3): Likewise. * m4/in_unpack.m4 (internal_unpack_'rtype_ccode`): Likewise. * m4/unpack.m4 (unpack0_'rtype_code`): Likewise. (unpack1_'rtype_code`): Likewise. * intrinsics/pack_generic.c (pack_generic.c): Likewise. * intrinsics/unpack_generic.c (unpack_internal): Likewise. * intrinsics/eoshift0.c (eoshift0): Likewise. * intrinsics/eoshift2.c (eoshift2): Likewise. * intrinsics/reshape_generic.c (reshape_internal): Likewise. * intrinsics/reshape_packed.c (reshape_packed): Likewise. * generated/eoshift1_16.c: Regenerated. * generated/eoshift1_4.c: Regenerated. * generated/eoshift1_8.c: Regenerated. * generated/eoshift3_16.c: Regenerated. * generated/eoshift3_4.c: Regenerated. * generated/eoshift3_8.c: Regenerated. * generated/in_pack_c10.c: Regenerated. * generated/in_pack_c16.c: Regenerated. * generated/in_pack_c4.c: Regenerated. * generated/in_pack_c8.c: Regenerated. * generated/in_pack_i1.c: Regenerated. * generated/in_pack_i16.c: Regenerated. * generated/in_pack_i2.c: Regenerated. * generated/in_pack_i4.c: Regenerated. * generated/in_pack_i8.c: Regenerated. * generated/in_pack_r10.c: Regenerated. * generated/in_pack_r16.c: Regenerated. * generated/in_pack_r4.c: Regenerated. * generated/in_pack_r8.c: Regenerated. * generated/in_unpack_c10.c: Regenerated. * generated/in_unpack_c16.c: Regenerated. * generated/in_unpack_c4.c: Regenerated. * generated/in_unpack_c8.c: Regenerated. * generated/in_unpack_i1.c: Regenerated. * generated/in_unpack_i16.c: Regenerated. * generated/in_unpack_i2.c: Regenerated. * generated/in_unpack_i4.c: Regenerated. * generated/in_unpack_i8.c: Regenerated. * generated/in_unpack_r10.c: Regenerated. * generated/in_unpack_r16.c: Regenerated. * generated/in_unpack_r4.c: Regenerated. * generated/in_unpack_r8.c: Regenerated. * generated/maxloc0_16_i1.c: Regenerated. * generated/maxloc0_16_i16.c: Regenerated. * generated/maxloc0_16_i2.c: Regenerated. * generated/maxloc0_16_i4.c: Regenerated. * generated/maxloc0_16_i8.c: Regenerated. * generated/maxloc0_16_r10.c: Regenerated. * generated/maxloc0_16_r16.c: Regenerated. * generated/maxloc0_16_r4.c: Regenerated. * generated/maxloc0_16_r8.c: Regenerated. * generated/maxloc0_4_i1.c: Regenerated. * generated/maxloc0_4_i16.c: Regenerated. * generated/maxloc0_4_i2.c: Regenerated. * generated/maxloc0_4_i4.c: Regenerated. * generated/maxloc0_4_i8.c: Regenerated. * generated/maxloc0_4_r10.c: Regenerated. * generated/maxloc0_4_r16.c: Regenerated. * generated/maxloc0_4_r4.c: Regenerated. * generated/maxloc0_4_r8.c: Regenerated. * generated/maxloc0_8_i1.c: Regenerated. * generated/maxloc0_8_i16.c: Regenerated. * generated/maxloc0_8_i2.c: Regenerated. * generated/maxloc0_8_i4.c: Regenerated. * generated/maxloc0_8_i8.c: Regenerated. * generated/maxloc0_8_r10.c: Regenerated. * generated/maxloc0_8_r16.c: Regenerated. * generated/maxloc0_8_r4.c: Regenerated. * generated/maxloc0_8_r8.c: Regenerated. * generated/minloc0_16_i1.c: Regenerated. * generated/minloc0_16_i16.c: Regenerated. * generated/minloc0_16_i2.c: Regenerated. * generated/minloc0_16_i4.c: Regenerated. * generated/minloc0_16_i8.c: Regenerated. * generated/minloc0_16_r10.c: Regenerated. * generated/minloc0_16_r16.c: Regenerated. * generated/minloc0_16_r4.c: Regenerated. * generated/minloc0_16_r8.c: Regenerated. * generated/minloc0_4_i1.c: Regenerated. * generated/minloc0_4_i16.c: Regenerated. * generated/minloc0_4_i2.c: Regenerated. * generated/minloc0_4_i4.c: Regenerated. * generated/minloc0_4_i8.c: Regenerated. * generated/minloc0_4_r10.c: Regenerated. * generated/minloc0_4_r16.c: Regenerated. * generated/minloc0_4_r4.c: Regenerated. * generated/minloc0_4_r8.c: Regenerated. * generated/minloc0_8_i1.c: Regenerated. * generated/minloc0_8_i16.c: Regenerated. * generated/minloc0_8_i2.c: Regenerated. * generated/minloc0_8_i4.c: Regenerated. * generated/minloc0_8_i8.c: Regenerated. * generated/minloc0_8_r10.c: Regenerated. * generated/minloc0_8_r16.c: Regenerated. * generated/minloc0_8_r4.c: Regenerated. * generated/minloc0_8_r8.c: Regenerated. * generated/pack_c10.c: Regenerated. * generated/pack_c16.c: Regenerated. * generated/pack_c4.c: Regenerated. * generated/pack_c8.c: Regenerated. * generated/pack_i1.c: Regenerated. * generated/pack_i16.c: Regenerated. * generated/pack_i2.c: Regenerated. * generated/pack_i4.c: Regenerated. * generated/pack_i8.c: Regenerated. * generated/pack_r10.c: Regenerated. * generated/pack_r16.c: Regenerated. * generated/pack_r4.c: Regenerated. * generated/pack_r8.c: Regenerated. * generated/spread_c10.c: Regenerated. * generated/spread_c16.c: Regenerated. * generated/spread_c4.c: Regenerated. * generated/spread_c8.c: Regenerated. * generated/spread_i1.c: Regenerated. * generated/spread_i16.c: Regenerated. * generated/spread_i2.c: Regenerated. * generated/spread_i4.c: Regenerated. * generated/spread_i8.c: Regenerated. * generated/spread_r10.c: Regenerated. * generated/spread_r16.c: Regenerated. * generated/spread_r4.c: Regenerated. * generated/spread_r8.c: Regenerated. * generated/transpose_c10.c: Regenerated. * generated/transpose_c16.c: Regenerated. * generated/transpose_c4.c: Regenerated. * generated/transpose_c8.c: Regenerated. * generated/transpose_i16.c: Regenerated. * generated/transpose_i4.c: Regenerated. * generated/transpose_i8.c: Regenerated. * generated/transpose_r10.c: Regenerated. * generated/transpose_r16.c: Regenerated. * generated/transpose_r4.c: Regenerated. * generated/transpose_r8.c: Regenerated. * generated/unpack_c10.c: Regenerated. * generated/unpack_c16.c: Regenerated. * generated/unpack_c4.c: Regenerated. * generated/unpack_c8.c: Regenerated. * generated/unpack_i1.c: Regenerated. * generated/unpack_i16.c: Regenerated. * generated/unpack_i2.c: Regenerated. * generated/unpack_i4.c: Regenerated. * generated/unpack_i8.c: Regenerated. * generated/unpack_r10.c: Regenerated. * generated/unpack_r16.c: Regenerated. * generated/unpack_r4.c: Regenerated. * generated/unpack_r8.c: Regenerated. From-SVN: r135512
339 lines
8.8 KiB
C
339 lines
8.8 KiB
C
/* Specific implementation of the UNPACK intrinsic
|
|
Copyright 2008 Free Software Foundation, Inc.
|
|
Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
|
|
unpack_generic.c by Paul Brook <paul@nowt.org>.
|
|
|
|
This file is part of the GNU Fortran 95 runtime library (libgfortran).
|
|
|
|
Libgfortran is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2 of the License, or (at your option) any later version.
|
|
|
|
In addition to the permissions in the GNU General Public License, the
|
|
Free Software Foundation gives you unlimited permission to link the
|
|
compiled version of this file into combinations with other programs,
|
|
and to distribute those combinations without any restriction coming
|
|
from the use of this file. (The General Public License restrictions
|
|
do apply in other respects; for example, they cover modification of
|
|
the file, and distribution when not linked into a combine
|
|
executable.)
|
|
|
|
Ligbfortran is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public
|
|
License along with libgfortran; see the file COPYING. If not,
|
|
write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
|
|
Boston, MA 02110-1301, USA. */
|
|
|
|
#include "libgfortran.h"
|
|
#include <stdlib.h>
|
|
#include <assert.h>
|
|
#include <string.h>
|
|
|
|
|
|
#if defined (HAVE_GFC_INTEGER_16)
|
|
|
|
void
|
|
unpack0_i16 (gfc_array_i16 *ret, const gfc_array_i16 *vector,
|
|
const gfc_array_l1 *mask, const GFC_INTEGER_16 *fptr)
|
|
{
|
|
/* r.* indicates the return array. */
|
|
index_type rstride[GFC_MAX_DIMENSIONS];
|
|
index_type rstride0;
|
|
index_type rs;
|
|
GFC_INTEGER_16 * restrict rptr;
|
|
/* v.* indicates the vector array. */
|
|
index_type vstride0;
|
|
GFC_INTEGER_16 *vptr;
|
|
/* Value for field, this is constant. */
|
|
const GFC_INTEGER_16 fval = *fptr;
|
|
/* m.* indicates the mask array. */
|
|
index_type mstride[GFC_MAX_DIMENSIONS];
|
|
index_type mstride0;
|
|
const GFC_LOGICAL_1 *mptr;
|
|
|
|
index_type count[GFC_MAX_DIMENSIONS];
|
|
index_type extent[GFC_MAX_DIMENSIONS];
|
|
index_type n;
|
|
index_type dim;
|
|
|
|
int empty;
|
|
int mask_kind;
|
|
|
|
empty = 0;
|
|
|
|
mptr = mask->data;
|
|
|
|
/* Use the same loop for all logical types, by using GFC_LOGICAL_1
|
|
and using shifting to address size and endian issues. */
|
|
|
|
mask_kind = GFC_DESCRIPTOR_SIZE (mask);
|
|
|
|
if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
|
|
#ifdef HAVE_GFC_LOGICAL_16
|
|
|| mask_kind == 16
|
|
#endif
|
|
)
|
|
{
|
|
/* Do not convert a NULL pointer as we use test for NULL below. */
|
|
if (mptr)
|
|
mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
|
|
}
|
|
else
|
|
runtime_error ("Funny sized logical array");
|
|
|
|
if (ret->data == NULL)
|
|
{
|
|
/* The front end has signalled that we need to populate the
|
|
return array descriptor. */
|
|
dim = GFC_DESCRIPTOR_RANK (mask);
|
|
rs = 1;
|
|
for (n = 0; n < dim; n++)
|
|
{
|
|
count[n] = 0;
|
|
ret->dim[n].stride = rs;
|
|
ret->dim[n].lbound = 0;
|
|
ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
|
|
extent[n] = ret->dim[n].ubound + 1;
|
|
empty = empty || extent[n] <= 0;
|
|
rstride[n] = ret->dim[n].stride;
|
|
mstride[n] = mask->dim[n].stride * mask_kind;
|
|
rs *= extent[n];
|
|
}
|
|
ret->offset = 0;
|
|
ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_16));
|
|
}
|
|
else
|
|
{
|
|
dim = GFC_DESCRIPTOR_RANK (ret);
|
|
for (n = 0; n < dim; n++)
|
|
{
|
|
count[n] = 0;
|
|
extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
|
|
empty = empty || extent[n] <= 0;
|
|
rstride[n] = ret->dim[n].stride;
|
|
mstride[n] = mask->dim[n].stride * mask_kind;
|
|
}
|
|
if (rstride[0] == 0)
|
|
rstride[0] = 1;
|
|
}
|
|
|
|
if (empty)
|
|
return;
|
|
|
|
if (mstride[0] == 0)
|
|
mstride[0] = 1;
|
|
|
|
vstride0 = vector->dim[0].stride;
|
|
if (vstride0 == 0)
|
|
vstride0 = 1;
|
|
rstride0 = rstride[0];
|
|
mstride0 = mstride[0];
|
|
rptr = ret->data;
|
|
vptr = vector->data;
|
|
|
|
while (rptr)
|
|
{
|
|
if (*mptr)
|
|
{
|
|
/* From vector. */
|
|
*rptr = *vptr;
|
|
vptr += vstride0;
|
|
}
|
|
else
|
|
{
|
|
/* From field. */
|
|
*rptr = fval;
|
|
}
|
|
/* Advance to the next element. */
|
|
rptr += rstride0;
|
|
mptr += mstride0;
|
|
count[0]++;
|
|
n = 0;
|
|
while (count[n] == extent[n])
|
|
{
|
|
/* When we get to the end of a dimension, reset it and increment
|
|
the next dimension. */
|
|
count[n] = 0;
|
|
/* We could precalculate these products, but this is a less
|
|
frequently used path so probably not worth it. */
|
|
rptr -= rstride[n] * extent[n];
|
|
mptr -= mstride[n] * extent[n];
|
|
n++;
|
|
if (n >= dim)
|
|
{
|
|
/* Break out of the loop. */
|
|
rptr = NULL;
|
|
break;
|
|
}
|
|
else
|
|
{
|
|
count[n]++;
|
|
rptr += rstride[n];
|
|
mptr += mstride[n];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void
|
|
unpack1_i16 (gfc_array_i16 *ret, const gfc_array_i16 *vector,
|
|
const gfc_array_l1 *mask, const gfc_array_i16 *field)
|
|
{
|
|
/* r.* indicates the return array. */
|
|
index_type rstride[GFC_MAX_DIMENSIONS];
|
|
index_type rstride0;
|
|
index_type rs;
|
|
GFC_INTEGER_16 * restrict rptr;
|
|
/* v.* indicates the vector array. */
|
|
index_type vstride0;
|
|
GFC_INTEGER_16 *vptr;
|
|
/* f.* indicates the field array. */
|
|
index_type fstride[GFC_MAX_DIMENSIONS];
|
|
index_type fstride0;
|
|
const GFC_INTEGER_16 *fptr;
|
|
/* m.* indicates the mask array. */
|
|
index_type mstride[GFC_MAX_DIMENSIONS];
|
|
index_type mstride0;
|
|
const GFC_LOGICAL_1 *mptr;
|
|
|
|
index_type count[GFC_MAX_DIMENSIONS];
|
|
index_type extent[GFC_MAX_DIMENSIONS];
|
|
index_type n;
|
|
index_type dim;
|
|
|
|
int empty;
|
|
int mask_kind;
|
|
|
|
empty = 0;
|
|
|
|
mptr = mask->data;
|
|
|
|
/* Use the same loop for all logical types, by using GFC_LOGICAL_1
|
|
and using shifting to address size and endian issues. */
|
|
|
|
mask_kind = GFC_DESCRIPTOR_SIZE (mask);
|
|
|
|
if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
|
|
#ifdef HAVE_GFC_LOGICAL_16
|
|
|| mask_kind == 16
|
|
#endif
|
|
)
|
|
{
|
|
/* Do not convert a NULL pointer as we use test for NULL below. */
|
|
if (mptr)
|
|
mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
|
|
}
|
|
else
|
|
runtime_error ("Funny sized logical array");
|
|
|
|
if (ret->data == NULL)
|
|
{
|
|
/* The front end has signalled that we need to populate the
|
|
return array descriptor. */
|
|
dim = GFC_DESCRIPTOR_RANK (mask);
|
|
rs = 1;
|
|
for (n = 0; n < dim; n++)
|
|
{
|
|
count[n] = 0;
|
|
ret->dim[n].stride = rs;
|
|
ret->dim[n].lbound = 0;
|
|
ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
|
|
extent[n] = ret->dim[n].ubound + 1;
|
|
empty = empty || extent[n] <= 0;
|
|
rstride[n] = ret->dim[n].stride;
|
|
fstride[n] = field->dim[n].stride;
|
|
mstride[n] = mask->dim[n].stride * mask_kind;
|
|
rs *= extent[n];
|
|
}
|
|
ret->offset = 0;
|
|
ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_16));
|
|
}
|
|
else
|
|
{
|
|
dim = GFC_DESCRIPTOR_RANK (ret);
|
|
for (n = 0; n < dim; n++)
|
|
{
|
|
count[n] = 0;
|
|
extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
|
|
empty = empty || extent[n] <= 0;
|
|
rstride[n] = ret->dim[n].stride;
|
|
fstride[n] = field->dim[n].stride;
|
|
mstride[n] = mask->dim[n].stride * mask_kind;
|
|
}
|
|
if (rstride[0] == 0)
|
|
rstride[0] = 1;
|
|
}
|
|
|
|
if (empty)
|
|
return;
|
|
|
|
if (fstride[0] == 0)
|
|
fstride[0] = 1;
|
|
if (mstride[0] == 0)
|
|
mstride[0] = 1;
|
|
|
|
vstride0 = vector->dim[0].stride;
|
|
if (vstride0 == 0)
|
|
vstride0 = 1;
|
|
rstride0 = rstride[0];
|
|
fstride0 = fstride[0];
|
|
mstride0 = mstride[0];
|
|
rptr = ret->data;
|
|
fptr = field->data;
|
|
vptr = vector->data;
|
|
|
|
while (rptr)
|
|
{
|
|
if (*mptr)
|
|
{
|
|
/* From vector. */
|
|
*rptr = *vptr;
|
|
vptr += vstride0;
|
|
}
|
|
else
|
|
{
|
|
/* From field. */
|
|
*rptr = *fptr;
|
|
}
|
|
/* Advance to the next element. */
|
|
rptr += rstride0;
|
|
fptr += fstride0;
|
|
mptr += mstride0;
|
|
count[0]++;
|
|
n = 0;
|
|
while (count[n] == extent[n])
|
|
{
|
|
/* When we get to the end of a dimension, reset it and increment
|
|
the next dimension. */
|
|
count[n] = 0;
|
|
/* We could precalculate these products, but this is a less
|
|
frequently used path so probably not worth it. */
|
|
rptr -= rstride[n] * extent[n];
|
|
fptr -= fstride[n] * extent[n];
|
|
mptr -= mstride[n] * extent[n];
|
|
n++;
|
|
if (n >= dim)
|
|
{
|
|
/* Break out of the loop. */
|
|
rptr = NULL;
|
|
break;
|
|
}
|
|
else
|
|
{
|
|
count[n]++;
|
|
rptr += rstride[n];
|
|
fptr += fstride[n];
|
|
mptr += mstride[n];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#endif
|
|
|