802367d7c9
2008-04-30 Thomas Koenig <tkoenig@gcc.gnu.org> PR libfortran/35993 * ifunction.m4 (SCALAR_ARRAY_FUNCTION): Use correct implementation for multi-dimensional return arrays when the mask is .false. * generated/maxloc1_16_i1.c: Regenerated. * generated/maxloc1_16_i16.c: Regenerated. * generated/maxloc1_16_i2.c: Regenerated. * generated/maxloc1_16_i4.c: Regenerated. * generated/maxloc1_16_i8.c: Regenerated. * generated/maxloc1_16_r10.c: Regenerated. * generated/maxloc1_16_r16.c: Regenerated. * generated/maxloc1_16_r4.c: Regenerated. * generated/maxloc1_16_r8.c: Regenerated. * generated/maxloc1_4_i1.c: Regenerated. * generated/maxloc1_4_i16.c: Regenerated. * generated/maxloc1_4_i2.c: Regenerated. * generated/maxloc1_4_i4.c: Regenerated. * generated/maxloc1_4_i8.c: Regenerated. * generated/maxloc1_4_r10.c: Regenerated. * generated/maxloc1_4_r16.c: Regenerated. * generated/maxloc1_4_r4.c: Regenerated. * generated/maxloc1_4_r8.c: Regenerated. * generated/maxloc1_8_i1.c: Regenerated. * generated/maxloc1_8_i16.c: Regenerated. * generated/maxloc1_8_i2.c: Regenerated. * generated/maxloc1_8_i4.c: Regenerated. * generated/maxloc1_8_i8.c: Regenerated. * generated/maxloc1_8_r10.c: Regenerated. * generated/maxloc1_8_r16.c: Regenerated. * generated/maxloc1_8_r4.c: Regenerated. * generated/maxloc1_8_r8.c: Regenerated. * generated/maxval_i1.c: Regenerated. * generated/maxval_i16.c: Regenerated. * generated/maxval_i2.c: Regenerated. * generated/maxval_i4.c: Regenerated. * generated/maxval_i8.c: Regenerated. * generated/maxval_r10.c: Regenerated. * generated/maxval_r16.c: Regenerated. * generated/maxval_r4.c: Regenerated. * generated/maxval_r8.c: Regenerated. * generated/minloc1_16_i1.c: Regenerated. * generated/minloc1_16_i16.c: Regenerated. * generated/minloc1_16_i2.c: Regenerated. * generated/minloc1_16_i4.c: Regenerated. * generated/minloc1_16_i8.c: Regenerated. * generated/minloc1_16_r10.c: Regenerated. * generated/minloc1_16_r16.c: Regenerated. * generated/minloc1_16_r4.c: Regenerated. * generated/minloc1_16_r8.c: Regenerated. * generated/minloc1_4_i1.c: Regenerated. * generated/minloc1_4_i16.c: Regenerated. * generated/minloc1_4_i2.c: Regenerated. * generated/minloc1_4_i4.c: Regenerated. * generated/minloc1_4_i8.c: Regenerated. * generated/minloc1_4_r10.c: Regenerated. * generated/minloc1_4_r16.c: Regenerated. * generated/minloc1_4_r4.c: Regenerated. * generated/minloc1_4_r8.c: Regenerated. * generated/minloc1_8_i1.c: Regenerated. * generated/minloc1_8_i16.c: Regenerated. * generated/minloc1_8_i2.c: Regenerated. * generated/minloc1_8_i4.c: Regenerated. * generated/minloc1_8_i8.c: Regenerated. * generated/minloc1_8_r10.c: Regenerated. * generated/minloc1_8_r16.c: Regenerated. * generated/minloc1_8_r4.c: Regenerated. * generated/minloc1_8_r8.c: Regenerated. * generated/minval_i1.c: Regenerated. * generated/minval_i16.c: Regenerated. * generated/minval_i2.c: Regenerated. * generated/minval_i4.c: Regenerated. * generated/minval_i8.c: Regenerated. * generated/minval_r10.c: Regenerated. * generated/minval_r16.c: Regenerated. * generated/minval_r4.c: Regenerated. * generated/minval_r8.c: Regenerated. * generated/product_c10.c: Regenerated. * generated/product_c16.c: Regenerated. * generated/product_c4.c: Regenerated. * generated/product_c8.c: Regenerated. * generated/product_i1.c: Regenerated. * generated/product_i16.c: Regenerated. * generated/product_i2.c: Regenerated. * generated/product_i4.c: Regenerated. * generated/product_i8.c: Regenerated. * generated/product_r10.c: Regenerated. * generated/product_r16.c: Regenerated. * generated/product_r4.c: Regenerated. * generated/product_r8.c: Regenerated. * generated/sum_c10.c: Regenerated. * generated/sum_c16.c: Regenerated. * generated/sum_c4.c: Regenerated. * generated/sum_c8.c: Regenerated. * generated/sum_i1.c: Regenerated. * generated/sum_i16.c: Regenerated. * generated/sum_i2.c: Regenerated. * generated/sum_i4.c: Regenerated. * generated/sum_i8.c: Regenerated. * generated/sum_r10.c: Regenerated. * generated/sum_r16.c: Regenerated. * generated/sum_r4.c: Regenerated. * generated/sum_r8.c: Regenerated. 2008-04-30 Thomas Koenig <tkoenig@gcc.gnu.org> PR libfortran/35993 * gfortran.dg/intrinsic_product_1.f90: New test case. From-SVN: r134830
547 lines
14 KiB
C
547 lines
14 KiB
C
/* Implementation of the SUM intrinsic
|
|
Copyright 2002, 2007 Free Software Foundation, Inc.
|
|
Contributed by Paul Brook <paul@nowt.org>
|
|
|
|
This file is part of the GNU Fortran 95 runtime library (libgfortran).
|
|
|
|
Libgfortran is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2 of the License, or (at your option) any later version.
|
|
|
|
In addition to the permissions in the GNU General Public License, the
|
|
Free Software Foundation gives you unlimited permission to link the
|
|
compiled version of this file into combinations with other programs,
|
|
and to distribute those combinations without any restriction coming
|
|
from the use of this file. (The General Public License restrictions
|
|
do apply in other respects; for example, they cover modification of
|
|
the file, and distribution when not linked into a combine
|
|
executable.)
|
|
|
|
Libgfortran is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public
|
|
License along with libgfortran; see the file COPYING. If not,
|
|
write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
|
|
Boston, MA 02110-1301, USA. */
|
|
|
|
#include "libgfortran.h"
|
|
#include <stdlib.h>
|
|
#include <assert.h>
|
|
|
|
|
|
#if defined (HAVE_GFC_COMPLEX_10) && defined (HAVE_GFC_COMPLEX_10)
|
|
|
|
|
|
extern void sum_c10 (gfc_array_c10 * const restrict,
|
|
gfc_array_c10 * const restrict, const index_type * const restrict);
|
|
export_proto(sum_c10);
|
|
|
|
void
|
|
sum_c10 (gfc_array_c10 * const restrict retarray,
|
|
gfc_array_c10 * const restrict array,
|
|
const index_type * const restrict pdim)
|
|
{
|
|
index_type count[GFC_MAX_DIMENSIONS];
|
|
index_type extent[GFC_MAX_DIMENSIONS];
|
|
index_type sstride[GFC_MAX_DIMENSIONS];
|
|
index_type dstride[GFC_MAX_DIMENSIONS];
|
|
const GFC_COMPLEX_10 * restrict base;
|
|
GFC_COMPLEX_10 * restrict dest;
|
|
index_type rank;
|
|
index_type n;
|
|
index_type len;
|
|
index_type delta;
|
|
index_type dim;
|
|
|
|
/* Make dim zero based to avoid confusion. */
|
|
dim = (*pdim) - 1;
|
|
rank = GFC_DESCRIPTOR_RANK (array) - 1;
|
|
|
|
len = array->dim[dim].ubound + 1 - array->dim[dim].lbound;
|
|
delta = array->dim[dim].stride;
|
|
|
|
for (n = 0; n < dim; n++)
|
|
{
|
|
sstride[n] = array->dim[n].stride;
|
|
extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound;
|
|
|
|
if (extent[n] < 0)
|
|
extent[n] = 0;
|
|
}
|
|
for (n = dim; n < rank; n++)
|
|
{
|
|
sstride[n] = array->dim[n + 1].stride;
|
|
extent[n] =
|
|
array->dim[n + 1].ubound + 1 - array->dim[n + 1].lbound;
|
|
|
|
if (extent[n] < 0)
|
|
extent[n] = 0;
|
|
}
|
|
|
|
if (retarray->data == NULL)
|
|
{
|
|
size_t alloc_size;
|
|
|
|
for (n = 0; n < rank; n++)
|
|
{
|
|
retarray->dim[n].lbound = 0;
|
|
retarray->dim[n].ubound = extent[n]-1;
|
|
if (n == 0)
|
|
retarray->dim[n].stride = 1;
|
|
else
|
|
retarray->dim[n].stride = retarray->dim[n-1].stride * extent[n-1];
|
|
}
|
|
|
|
retarray->offset = 0;
|
|
retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank;
|
|
|
|
alloc_size = sizeof (GFC_COMPLEX_10) * retarray->dim[rank-1].stride
|
|
* extent[rank-1];
|
|
|
|
if (alloc_size == 0)
|
|
{
|
|
/* Make sure we have a zero-sized array. */
|
|
retarray->dim[0].lbound = 0;
|
|
retarray->dim[0].ubound = -1;
|
|
return;
|
|
}
|
|
else
|
|
retarray->data = internal_malloc_size (alloc_size);
|
|
}
|
|
else
|
|
{
|
|
if (rank != GFC_DESCRIPTOR_RANK (retarray))
|
|
runtime_error ("rank of return array incorrect in"
|
|
" SUM intrinsic: is %ld, should be %ld",
|
|
(long int) (GFC_DESCRIPTOR_RANK (retarray)),
|
|
(long int) rank);
|
|
|
|
if (compile_options.bounds_check)
|
|
{
|
|
for (n=0; n < rank; n++)
|
|
{
|
|
index_type ret_extent;
|
|
|
|
ret_extent = retarray->dim[n].ubound + 1
|
|
- retarray->dim[n].lbound;
|
|
if (extent[n] != ret_extent)
|
|
runtime_error ("Incorrect extent in return value of"
|
|
" SUM intrinsic in dimension %ld:"
|
|
" is %ld, should be %ld", (long int) n + 1,
|
|
(long int) ret_extent, (long int) extent[n]);
|
|
}
|
|
}
|
|
}
|
|
|
|
for (n = 0; n < rank; n++)
|
|
{
|
|
count[n] = 0;
|
|
dstride[n] = retarray->dim[n].stride;
|
|
if (extent[n] <= 0)
|
|
len = 0;
|
|
}
|
|
|
|
base = array->data;
|
|
dest = retarray->data;
|
|
|
|
while (base)
|
|
{
|
|
const GFC_COMPLEX_10 * restrict src;
|
|
GFC_COMPLEX_10 result;
|
|
src = base;
|
|
{
|
|
|
|
result = 0;
|
|
if (len <= 0)
|
|
*dest = 0;
|
|
else
|
|
{
|
|
for (n = 0; n < len; n++, src += delta)
|
|
{
|
|
|
|
result += *src;
|
|
}
|
|
*dest = result;
|
|
}
|
|
}
|
|
/* Advance to the next element. */
|
|
count[0]++;
|
|
base += sstride[0];
|
|
dest += dstride[0];
|
|
n = 0;
|
|
while (count[n] == extent[n])
|
|
{
|
|
/* When we get to the end of a dimension, reset it and increment
|
|
the next dimension. */
|
|
count[n] = 0;
|
|
/* We could precalculate these products, but this is a less
|
|
frequently used path so probably not worth it. */
|
|
base -= sstride[n] * extent[n];
|
|
dest -= dstride[n] * extent[n];
|
|
n++;
|
|
if (n == rank)
|
|
{
|
|
/* Break out of the look. */
|
|
base = NULL;
|
|
break;
|
|
}
|
|
else
|
|
{
|
|
count[n]++;
|
|
base += sstride[n];
|
|
dest += dstride[n];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
extern void msum_c10 (gfc_array_c10 * const restrict,
|
|
gfc_array_c10 * const restrict, const index_type * const restrict,
|
|
gfc_array_l1 * const restrict);
|
|
export_proto(msum_c10);
|
|
|
|
void
|
|
msum_c10 (gfc_array_c10 * const restrict retarray,
|
|
gfc_array_c10 * const restrict array,
|
|
const index_type * const restrict pdim,
|
|
gfc_array_l1 * const restrict mask)
|
|
{
|
|
index_type count[GFC_MAX_DIMENSIONS];
|
|
index_type extent[GFC_MAX_DIMENSIONS];
|
|
index_type sstride[GFC_MAX_DIMENSIONS];
|
|
index_type dstride[GFC_MAX_DIMENSIONS];
|
|
index_type mstride[GFC_MAX_DIMENSIONS];
|
|
GFC_COMPLEX_10 * restrict dest;
|
|
const GFC_COMPLEX_10 * restrict base;
|
|
const GFC_LOGICAL_1 * restrict mbase;
|
|
int rank;
|
|
int dim;
|
|
index_type n;
|
|
index_type len;
|
|
index_type delta;
|
|
index_type mdelta;
|
|
int mask_kind;
|
|
|
|
dim = (*pdim) - 1;
|
|
rank = GFC_DESCRIPTOR_RANK (array) - 1;
|
|
|
|
len = array->dim[dim].ubound + 1 - array->dim[dim].lbound;
|
|
if (len <= 0)
|
|
return;
|
|
|
|
mbase = mask->data;
|
|
|
|
mask_kind = GFC_DESCRIPTOR_SIZE (mask);
|
|
|
|
if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
|
|
#ifdef HAVE_GFC_LOGICAL_16
|
|
|| mask_kind == 16
|
|
#endif
|
|
)
|
|
mbase = GFOR_POINTER_TO_L1 (mbase, mask_kind);
|
|
else
|
|
runtime_error ("Funny sized logical array");
|
|
|
|
delta = array->dim[dim].stride;
|
|
mdelta = mask->dim[dim].stride * mask_kind;
|
|
|
|
for (n = 0; n < dim; n++)
|
|
{
|
|
sstride[n] = array->dim[n].stride;
|
|
mstride[n] = mask->dim[n].stride * mask_kind;
|
|
extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound;
|
|
|
|
if (extent[n] < 0)
|
|
extent[n] = 0;
|
|
|
|
}
|
|
for (n = dim; n < rank; n++)
|
|
{
|
|
sstride[n] = array->dim[n + 1].stride;
|
|
mstride[n] = mask->dim[n + 1].stride * mask_kind;
|
|
extent[n] =
|
|
array->dim[n + 1].ubound + 1 - array->dim[n + 1].lbound;
|
|
|
|
if (extent[n] < 0)
|
|
extent[n] = 0;
|
|
}
|
|
|
|
if (retarray->data == NULL)
|
|
{
|
|
size_t alloc_size;
|
|
|
|
for (n = 0; n < rank; n++)
|
|
{
|
|
retarray->dim[n].lbound = 0;
|
|
retarray->dim[n].ubound = extent[n]-1;
|
|
if (n == 0)
|
|
retarray->dim[n].stride = 1;
|
|
else
|
|
retarray->dim[n].stride = retarray->dim[n-1].stride * extent[n-1];
|
|
}
|
|
|
|
alloc_size = sizeof (GFC_COMPLEX_10) * retarray->dim[rank-1].stride
|
|
* extent[rank-1];
|
|
|
|
retarray->offset = 0;
|
|
retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank;
|
|
|
|
if (alloc_size == 0)
|
|
{
|
|
/* Make sure we have a zero-sized array. */
|
|
retarray->dim[0].lbound = 0;
|
|
retarray->dim[0].ubound = -1;
|
|
return;
|
|
}
|
|
else
|
|
retarray->data = internal_malloc_size (alloc_size);
|
|
|
|
}
|
|
else
|
|
{
|
|
if (rank != GFC_DESCRIPTOR_RANK (retarray))
|
|
runtime_error ("rank of return array incorrect in SUM intrinsic");
|
|
|
|
if (compile_options.bounds_check)
|
|
{
|
|
for (n=0; n < rank; n++)
|
|
{
|
|
index_type ret_extent;
|
|
|
|
ret_extent = retarray->dim[n].ubound + 1
|
|
- retarray->dim[n].lbound;
|
|
if (extent[n] != ret_extent)
|
|
runtime_error ("Incorrect extent in return value of"
|
|
" SUM intrinsic in dimension %ld:"
|
|
" is %ld, should be %ld", (long int) n + 1,
|
|
(long int) ret_extent, (long int) extent[n]);
|
|
}
|
|
for (n=0; n<= rank; n++)
|
|
{
|
|
index_type mask_extent, array_extent;
|
|
|
|
array_extent = array->dim[n].ubound + 1 - array->dim[n].lbound;
|
|
mask_extent = mask->dim[n].ubound + 1 - mask->dim[n].lbound;
|
|
if (array_extent != mask_extent)
|
|
runtime_error ("Incorrect extent in MASK argument of"
|
|
" SUM intrinsic in dimension %ld:"
|
|
" is %ld, should be %ld", (long int) n + 1,
|
|
(long int) mask_extent, (long int) array_extent);
|
|
}
|
|
}
|
|
}
|
|
|
|
for (n = 0; n < rank; n++)
|
|
{
|
|
count[n] = 0;
|
|
dstride[n] = retarray->dim[n].stride;
|
|
if (extent[n] <= 0)
|
|
return;
|
|
}
|
|
|
|
dest = retarray->data;
|
|
base = array->data;
|
|
|
|
while (base)
|
|
{
|
|
const GFC_COMPLEX_10 * restrict src;
|
|
const GFC_LOGICAL_1 * restrict msrc;
|
|
GFC_COMPLEX_10 result;
|
|
src = base;
|
|
msrc = mbase;
|
|
{
|
|
|
|
result = 0;
|
|
if (len <= 0)
|
|
*dest = 0;
|
|
else
|
|
{
|
|
for (n = 0; n < len; n++, src += delta, msrc += mdelta)
|
|
{
|
|
|
|
if (*msrc)
|
|
result += *src;
|
|
}
|
|
*dest = result;
|
|
}
|
|
}
|
|
/* Advance to the next element. */
|
|
count[0]++;
|
|
base += sstride[0];
|
|
mbase += mstride[0];
|
|
dest += dstride[0];
|
|
n = 0;
|
|
while (count[n] == extent[n])
|
|
{
|
|
/* When we get to the end of a dimension, reset it and increment
|
|
the next dimension. */
|
|
count[n] = 0;
|
|
/* We could precalculate these products, but this is a less
|
|
frequently used path so probably not worth it. */
|
|
base -= sstride[n] * extent[n];
|
|
mbase -= mstride[n] * extent[n];
|
|
dest -= dstride[n] * extent[n];
|
|
n++;
|
|
if (n == rank)
|
|
{
|
|
/* Break out of the look. */
|
|
base = NULL;
|
|
break;
|
|
}
|
|
else
|
|
{
|
|
count[n]++;
|
|
base += sstride[n];
|
|
mbase += mstride[n];
|
|
dest += dstride[n];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
extern void ssum_c10 (gfc_array_c10 * const restrict,
|
|
gfc_array_c10 * const restrict, const index_type * const restrict,
|
|
GFC_LOGICAL_4 *);
|
|
export_proto(ssum_c10);
|
|
|
|
void
|
|
ssum_c10 (gfc_array_c10 * const restrict retarray,
|
|
gfc_array_c10 * const restrict array,
|
|
const index_type * const restrict pdim,
|
|
GFC_LOGICAL_4 * mask)
|
|
{
|
|
index_type count[GFC_MAX_DIMENSIONS];
|
|
index_type extent[GFC_MAX_DIMENSIONS];
|
|
index_type sstride[GFC_MAX_DIMENSIONS];
|
|
index_type dstride[GFC_MAX_DIMENSIONS];
|
|
GFC_COMPLEX_10 * restrict dest;
|
|
index_type rank;
|
|
index_type n;
|
|
index_type dim;
|
|
|
|
|
|
if (*mask)
|
|
{
|
|
sum_c10 (retarray, array, pdim);
|
|
return;
|
|
}
|
|
/* Make dim zero based to avoid confusion. */
|
|
dim = (*pdim) - 1;
|
|
rank = GFC_DESCRIPTOR_RANK (array) - 1;
|
|
|
|
for (n = 0; n < dim; n++)
|
|
{
|
|
sstride[n] = array->dim[n].stride;
|
|
extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound;
|
|
|
|
if (extent[n] <= 0)
|
|
extent[n] = 0;
|
|
}
|
|
|
|
for (n = dim; n < rank; n++)
|
|
{
|
|
sstride[n] = array->dim[n + 1].stride;
|
|
extent[n] =
|
|
array->dim[n + 1].ubound + 1 - array->dim[n + 1].lbound;
|
|
|
|
if (extent[n] <= 0)
|
|
extent[n] = 0;
|
|
}
|
|
|
|
if (retarray->data == NULL)
|
|
{
|
|
size_t alloc_size;
|
|
|
|
for (n = 0; n < rank; n++)
|
|
{
|
|
retarray->dim[n].lbound = 0;
|
|
retarray->dim[n].ubound = extent[n]-1;
|
|
if (n == 0)
|
|
retarray->dim[n].stride = 1;
|
|
else
|
|
retarray->dim[n].stride = retarray->dim[n-1].stride * extent[n-1];
|
|
}
|
|
|
|
retarray->offset = 0;
|
|
retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank;
|
|
|
|
alloc_size = sizeof (GFC_COMPLEX_10) * retarray->dim[rank-1].stride
|
|
* extent[rank-1];
|
|
|
|
if (alloc_size == 0)
|
|
{
|
|
/* Make sure we have a zero-sized array. */
|
|
retarray->dim[0].lbound = 0;
|
|
retarray->dim[0].ubound = -1;
|
|
return;
|
|
}
|
|
else
|
|
retarray->data = internal_malloc_size (alloc_size);
|
|
}
|
|
else
|
|
{
|
|
if (rank != GFC_DESCRIPTOR_RANK (retarray))
|
|
runtime_error ("rank of return array incorrect in"
|
|
" SUM intrinsic: is %ld, should be %ld",
|
|
(long int) (GFC_DESCRIPTOR_RANK (retarray)),
|
|
(long int) rank);
|
|
|
|
if (compile_options.bounds_check)
|
|
{
|
|
for (n=0; n < rank; n++)
|
|
{
|
|
index_type ret_extent;
|
|
|
|
ret_extent = retarray->dim[n].ubound + 1
|
|
- retarray->dim[n].lbound;
|
|
if (extent[n] != ret_extent)
|
|
runtime_error ("Incorrect extent in return value of"
|
|
" SUM intrinsic in dimension %ld:"
|
|
" is %ld, should be %ld", (long int) n + 1,
|
|
(long int) ret_extent, (long int) extent[n]);
|
|
}
|
|
}
|
|
}
|
|
|
|
for (n = 0; n < rank; n++)
|
|
{
|
|
count[n] = 0;
|
|
dstride[n] = retarray->dim[n].stride;
|
|
}
|
|
|
|
dest = retarray->data;
|
|
|
|
while(1)
|
|
{
|
|
*dest = 0;
|
|
count[0]++;
|
|
dest += dstride[0];
|
|
n = 0;
|
|
while (count[n] == extent[n])
|
|
{
|
|
/* When we get to the end of a dimension, reset it and increment
|
|
the next dimension. */
|
|
count[n] = 0;
|
|
/* We could precalculate these products, but this is a less
|
|
frequently used path so probably not worth it. */
|
|
dest -= dstride[n] * extent[n];
|
|
n++;
|
|
if (n == rank)
|
|
return;
|
|
else
|
|
{
|
|
count[n]++;
|
|
dest += dstride[n];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#endif
|