valarray_array.h (__valarray_get_memory, [...]): New functions.

2000-07-15  Gabriel Dos Reis  <gdr@codesourcery.com>

	* std/valarray_array.h (__valarray_get_memory,
	__valarray_get_storage, __valarray_release_storage): New
	functions.
	(_Array_default_ctor, _Array_init_ctor, _Array_copy_ctor,
	_Array_copier): New traits classes.
	(__valarray_default_construct): New function.  Implements valarray
	default construction.
	(__valarray_fill_construct): New function. Implements valarray
	construction with initializer.
	(__valarray_copy_construct): New function.  Implements valarray
	copy construction.
	(__valarray_destroy_elements): New function.
	(__valarray_copy, __valarray_fill): Tweak.
	(__valarray_sum, __valarray_product): New helper functions.
	(_Array<>::free_data): Remove.
	(_Array<>::_Array): Tweak.

	* std/std_valarray.h (valarray<>::product): Remove.
	(valarray<>::valarray): Use __valarray_get_storage.
	(valarray<>::shift, valarray<>::cshift, valarray<>::resize):
	Tweak.

	* std/cpp_type_traits.h: New file.

	* valarray.cc (multiplies<>, accumulate, valarray<>::product):
	Remove explicit instantiation.
	(__valarray_product): New function.
	(_Indexer::_Indexer): Use.

From-SVN: r35055
This commit is contained in:
Gabriel Dos Reis 2000-07-15 21:54:06 +00:00 committed by Gabriel Dos Reis
parent 557b9df529
commit 5b2ff385d5
5 changed files with 628 additions and 88 deletions

View File

@ -1,3 +1,34 @@
2000-07-15 Gabriel Dos Reis <gdr@codesourcery.com>
* std/valarray_array.h (__valarray_get_memory,
__valarray_get_storage, __valarray_release_storage): New
functions.
(_Array_default_ctor, _Array_init_ctor, _Array_copy_ctor,
_Array_copier): New traits classes.
(__valarray_default_construct): New function. Implements valarray
default construction.
(__valarray_fill_construct): New function. Implements valarray
construction with initializer.
(__valarray_copy_construct): New function. Implements valarray
copy construction.
(__valarray_destroy_elements): New function.
(__valarray_copy, __valarray_fill): Tweak.
(__valarray_sum, __valarray_product): New helper functions.
(_Array<>::free_data): Remove.
(_Array<>::_Array): Tweak.
* std/std_valarray.h (valarray<>::product): Remove.
(valarray<>::valarray): Use __valarray_get_storage.
(valarray<>::shift, valarray<>::cshift, valarray<>::resize):
Tweak.
* std/cpp_type_traits.h: New file.
* valarray.cc (multiplies<>, accumulate, valarray<>::product):
Remove explicit instantiation.
(__valarray_product): New function.
(_Indexer::_Indexer): Use.
2000-07-14 Jean-Francois Panisset <panisset@discreet.com> 2000-07-14 Jean-Francois Panisset <panisset@discreet.com>
* std/bastring.h (basic_string<>::clear): Add function. * std/bastring.h (basic_string<>::clear): Add function.

View File

@ -0,0 +1,299 @@
// The -*- C++ -*- type traits classes for internal use in libstdc++
// Copyright (C) 2000 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 2, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING. If not, write to the Free
// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
// USA.
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
// Written by Gabriel Dos Reis <dosreis@cmla.ens-cachan.fr>
#ifndef _CPP_BITS_CPP_TYPE_TRAITS_H
#define _CPP_BITS_CPP_TYPE_TRAITS_H 1
//
// This file provides some compile-time information about various types.
// These informations were designed, on purpose, to be constant-expressions
// and not types as found in <stl/bits/type_traits.h>. In particular, they
// can be used in control structures and the optimizer, hopefully, will do
// the obvious thing.
//
// Why integral expressions, and not functions nor types?
// Firstly, these compile-time information entities are used as
// template-arguments so function return values won't work. We
// need compile-time entities. We're left with types and iintegral constant
// expressions.
// Secondly, from the point of view of ease of use, type-based compile-time
// information is -not- *that* convenient. One has to write lots of
// overloaded functions and to hope that the compiler will select the right
// one. As a net effect, the overall structure isn't very clear at first
// glance.
// Thirdly, partial ordering and overload resolution (of template functions)
// is very costly in terms of compiler-resource. It is a Good Thing to
// keep these resource consumption as least as possible. Please, direct
// any comment to <dosreis@cmla.ens-cachan.fr>.
//
// -- Gaby (dosreis@cmla.ens-cachan.fr) 2000-03-06.
//
extern "C++" {
template<typename _Tp>
struct __is_void
{
enum
{
_M_type = 0
};
};
template<>
struct __is_void<void>
{
enum
{
_M_type = 1
};
};
//
// Integer types
//
template<typename _Tp>
struct __is_integer
{
enum
{
_M_type = 0
};
};
// Thirteen specializations (yes there are eleven standard integer
// types; 'long long' and 'unsigned long long' are supported as
// extensions)
template<>
struct __is_integer<bool>
{
enum
{
_M_type = 1
};
};
template<>
struct __is_integer<char>
{
enum
{
_M_type = 1
};
};
template<>
struct __is_integer<signed char>
{
enum
{
_M_type = 1
};
};
template<>
struct __is_integer<unsigned char>
{
enum
{
_M_type = 1
};
};
# if 0
template<>
struct __is_integer<wchar_t>
{
enum
{
_M_type = 1
};
};
# endif
template<>
struct __is_integer<short>
{
enum
{
_M_type = 1
};
};
template<>
struct __is_integer<unsigned short>
{
enum
{
_M_type = 1
};
};
template<>
struct __is_integer<int>
{
enum
{
_M_type = 1
};
};
template<>
struct __is_integer<unsigned int>
{
enum
{
_M_type = 1
};
};
template<>
struct __is_integer<long>
{
enum
{
_M_type = 1
};
};
template<>
struct __is_integer<unsigned long>
{
enum
{
_M_type = 1
};
};
# if 0
template<>
struct __is_integer<long long>
{
enum
{
_M_type = 1
};
};
template<>
struct __is_integer<unsigned long long>
{
enum
{
_M_type = 1
};
};
# endif
//
// Floating point types
//
template<typename _Tp>
struct __is_floating
{
enum
{
_M_type = 0
};
};
// three specializations (float, double and 'long double')
template<>
struct __is_floating<float>
{
enum
{
_M_type = 1
};
};
template<>
struct __is_floating<double>
{
enum
{
_M_type = 1
};
};
template<>
struct __is_floating<long double>
{
enum
{
_M_type = 1
};
};
//
// An arithmetic type is an integer type or a floating point type
//
template<typename _Tp>
struct __is_arithmetic
{
enum
{
_M_type = __is_integer<_Tp>::_M_type || __is_floating<_Tp>::_M_type
};
};
//
// A fundamental type is `void' or and arithmetic type
//
template<typename _Tp>
struct __is_fundamental
{
enum
{
_M_type = __is_void<_Tp>::_M_type || __is_arithmetic<_Tp>::_M_type
};
};
//
// For the immediate use, the following is a good approximation
//
template<typename _Tp>
struct __is_pod
{
enum
{
_M_type = __is_fundamental<_Tp>::_M_type
};
};
} // extern "C++"
#endif //_CPP_BITS_CPP_TYPE_TRAITS_H

View File

@ -212,10 +212,7 @@ public:
_Tp sum() const; _Tp sum() const;
_Tp min() const; _Tp min() const;
_Tp max() const; _Tp max() const;
// FIXME: Extension
_Tp product () const;
valarray<_Tp> shift (int) const; valarray<_Tp> shift (int) const;
valarray<_Tp> cshift(int) const; valarray<_Tp> cshift(int) const;
_Expr<_ValFunClos<_ValArray,_Tp>,_Tp> apply(_Tp func(_Tp)) const; _Expr<_ValFunClos<_ValArray,_Tp>,_Tp> apply(_Tp func(_Tp)) const;
@ -285,54 +282,69 @@ inline valarray<_Tp>::valarray () : _M_size (0), _M_data (0) {}
template<typename _Tp> template<typename _Tp>
inline valarray<_Tp>::valarray (size_t __n) inline valarray<_Tp>::valarray (size_t __n)
: _M_size (__n), _M_data (new _Tp[__n]) {} : _M_size (__n), _M_data(__valarray_get_storage<_Tp>(__n))
{ __valarray_default_construct(_M_data, _M_data + __n); }
template<typename _Tp> template<typename _Tp>
inline valarray<_Tp>::valarray (const _Tp& __t, size_t __n) inline valarray<_Tp>::valarray (const _Tp& __t, size_t __n)
: _M_size (__n), _M_data (new _Tp[__n]) : _M_size (__n), _M_data(__valarray_get_storage<_Tp>(__n))
{ __valarray_fill (_M_data, _M_size, __t); } { __valarray_fill_construct(_M_data, _M_data + __n, __t); }
template<typename _Tp> template<typename _Tp>
inline valarray<_Tp>::valarray (const _Tp* __restrict__ __pT, size_t __n) inline valarray<_Tp>::valarray (const _Tp* __restrict__ __pT, size_t __n)
: _M_size (__n), _M_data (new _Tp[__n]) : _M_size (__n), _M_data(__valarray_get_storage<_Tp>(__n))
{ __valarray_copy (__pT, __n, _M_data); } { __valarray_copy_construct(__pT, __pT + __n, _M_data); }
template<typename _Tp> template<typename _Tp>
inline valarray<_Tp>::valarray (const valarray<_Tp>& __v) inline valarray<_Tp>::valarray (const valarray<_Tp>& __v)
: _M_size (__v._M_size), _M_data (new _Tp[__v._M_size]) : _M_size (__v._M_size), _M_data(__valarray_get_storage<_Tp>(__v._M_size))
{ __valarray_copy (__v._M_data, _M_size, _M_data); } { __valarray_copy_construct (__v._M_data, __v._M_data + _M_size, _M_data); }
template<typename _Tp> template<typename _Tp>
inline valarray<_Tp>::valarray (const slice_array<_Tp>& __sa) inline valarray<_Tp>::valarray (const slice_array<_Tp>& __sa)
: _M_size (__sa._M_sz), _M_data (new _Tp[__sa._M_sz]) : _M_size (__sa._M_sz), _M_data(__valarray_get_storage<_Tp>(__sa._M_sz))
{ __valarray_copy (__sa._M_array, __sa._M_sz, __sa._M_stride, {
_Array<_Tp>(_M_data)); } __valarray_copy_construct
(__sa._M_array, __sa._M_sz, __sa._M_stride, _Array<_Tp>(_M_data));
}
template<typename _Tp> template<typename _Tp>
inline valarray<_Tp>::valarray (const gslice_array<_Tp>& __ga) inline valarray<_Tp>::valarray (const gslice_array<_Tp>& __ga)
: _M_size (__ga._M_index.size()), _M_data (new _Tp[_M_size]) : _M_size (__ga._M_index.size()),
{ __valarray_copy (__ga._M_array, _Array<size_t>(__ga._M_index), _M_data(__valarray_get_storage<_Tp>(_M_size))
_Array<_Tp>(_M_data), _M_size); } {
__valarray_copy_construct
(__ga._M_array, _Array<size_t>(__ga._M_index),
_Array<_Tp>(_M_data), _M_size);
}
template<typename _Tp> template<typename _Tp>
inline valarray<_Tp>::valarray (const mask_array<_Tp>& __ma) inline valarray<_Tp>::valarray (const mask_array<_Tp>& __ma)
: _M_size (__ma._M_sz), _M_data (new _Tp[__ma._M_sz]) : _M_size (__ma._M_sz), _M_data(__valarray_get_storage<_Tp>(__ma._M_sz))
{ __valarray_copy (__ma._M_array, __ma._M_mask, {
_Array<_Tp>(_M_data), _M_size); } __valarray_copy_construct
(__ma._M_array, __ma._M_mask, _Array<_Tp>(_M_data), _M_size);
}
template<typename _Tp> template<typename _Tp>
inline valarray<_Tp>::valarray (const indirect_array<_Tp>& __ia) inline valarray<_Tp>::valarray (const indirect_array<_Tp>& __ia)
: _M_size (__ia._M_sz), _M_data (new _Tp[__ia._M_sz]) : _M_size (__ia._M_sz), _M_data(__valarray_get_storage<_Tp>(__ia._M_size))
{ __valarray_copy (__ia._M_array, __ia._M_index, {
_Array<_Tp>(_M_data), _M_size); } __valarray_copy_construct
(__ia._M_array, __ia._M_index, _Array<_Tp>(_M_data), _M_size);
}
template<typename _Tp> template<class _Dom> template<typename _Tp> template<class _Dom>
inline valarray<_Tp>::valarray (const _Expr<_Dom, _Tp>& __e) inline valarray<_Tp>::valarray (const _Expr<_Dom, _Tp>& __e)
: _M_size (__e.size ()), _M_data (new _Tp[_M_size]) : _M_size (__e.size ()), _M_data (__valarray_get_storage<_Tp>(_M_size))
{ __valarray_copy (__e, _M_size, _Array<_Tp>(_M_data)); } { __valarray_copy_construct (__e, _M_size, _Array<_Tp>(_M_data)); }
template<typename _Tp> template<typename _Tp>
inline valarray<_Tp>::~valarray () { delete[] _M_data; } inline valarray<_Tp>::~valarray ()
{
__valarray_destroy_elements(_M_data, _M_data + _M_size);
__valarray_release_storage(_M_data);
}
template<typename _Tp> template<typename _Tp>
inline valarray<_Tp>& inline valarray<_Tp>&
@ -472,14 +484,7 @@ template<class _Tp>
inline _Tp inline _Tp
valarray<_Tp>::sum () const valarray<_Tp>::sum () const
{ {
return accumulate (_M_data, _M_data + _M_size, _Tp ()); return __valarray_sum(_M_data, _M_data + _M_size);
}
template<typename _Tp>
inline _Tp
valarray<_Tp>::product () const
{
return accumulate (_M_data, _M_data+_M_size, _Tp(1), multiplies<_Tp> ());
} }
template <class _Tp> template <class _Tp>
@ -488,18 +493,18 @@ valarray<_Tp>::shift (int __n) const
{ {
_Tp* const __a = static_cast<_Tp*> (alloca (sizeof(_Tp) * _M_size)); _Tp* const __a = static_cast<_Tp*> (alloca (sizeof(_Tp) * _M_size));
if (! __n) // __n == 0: no shift if (! __n) // __n == 0: no shift
__valarray_copy (_M_data, _M_size, __a); __valarray_copy_construct (_M_data, _M_size, __a);
else if (__n > 0) { // __n > 0: shift left else if (__n > 0) { // __n > 0: shift left
if (__n > _M_size) if (__n > _M_size)
__valarray_fill(__a, __n, _Tp()); __valarray_default_construct(__a, __a + __n);
else { else {
__valarray_copy (_M_data+__n, _M_size-__n, __a); __valarray_copy_construct (_M_data+__n, _M_size-__n, __a);
__valarray_fill (__a+_M_size-__n, __n, _Tp()); __valarray_default_construct (__a+_M_size-__n, __a + _M_size);
} }
} }
else { // __n < 0: shift right else { // __n < 0: shift right
__valarray_copy (_M_data, _M_size+__n, __a-__n); __valarray_copy_construct (_M_data, _M_data+_M_size+__n, __a-__n);
__valarray_fill(__a, -__n, _Tp()); __valarray_default_construct(__a, __a-__n);
} }
return valarray<_Tp> (__a, _M_size); return valarray<_Tp> (__a, _M_size);
} }
@ -509,15 +514,17 @@ inline valarray<_Tp>
valarray<_Tp>::cshift (int __n) const valarray<_Tp>::cshift (int __n) const
{ {
_Tp* const __a = static_cast<_Tp*> (alloca (sizeof(_Tp) * _M_size)); _Tp* const __a = static_cast<_Tp*> (alloca (sizeof(_Tp) * _M_size));
if (! __n) // __n == 0: no cshift if (__n == 0) // __n == 0: no cshift
__valarray_copy(_M_data, _M_size, __a); __valarray_copy_construct(_M_data, _M_data + _M_size, __a);
else if (__n > 0) { // __n > 0: cshift left else if (__n > 0) { // __n > 0: cshift left
__valarray_copy (_M_data, __n, __a + _M_size-__n); __valarray_copy_construct (_M_data, _M_data + __n, __a + _M_size-__n);
__valarray_copy (_M_data + __n, _M_size-__n, __a); __valarray_copy_construct (_M_data + __n, _M_data + _M_size, __a);
} }
else { // __n < 0: cshift right else { // __n < 0: cshift right
__valarray_copy (_M_data + _M_size + __n, -__n, __a); __valarray_copy_construct
__valarray_copy (_M_data, _M_size + __n, __a - __n); (_M_data + _M_size + __n, _M_data + _M_size, __a);
__valarray_copy_construct
(_M_data, _M_data + _M_size + __n, __a - __n);
} }
return valarray<_Tp> (__a, _M_size); return valarray<_Tp> (__a, _M_size);
} }
@ -526,12 +533,15 @@ template <class _Tp>
inline void inline void
valarray<_Tp>::resize (size_t __n, _Tp __c) valarray<_Tp>::resize (size_t __n, _Tp __c)
{ {
if (_M_size != __n) { // this is so to make valarray<valarray<T> > work
delete[] _M_data; // even though it is not required by the standard.
_M_size = __n; __valarray_destroy_elements(_M_data, _M_data + _M_size);
_M_data = new _Tp[_M_size]; if (_M_size != __n) {
} __valarray_release_storage(_M_data);
__valarray_fill (_M_data, _M_size, __c); _M_size = __n;
_M_data = __valarray_get_storage<_Tp>(__n);
}
__valarray_fill_construct (_M_data, _M_data + _M_size, __c);
} }
template<typename _Tp> template<typename _Tp>

View File

@ -34,39 +34,205 @@
#include <cstdlib> #include <cstdlib>
#include <cstring> #include <cstring>
#include <std/cpp_type_traits.h>
extern "C++" { extern "C++" {
// //
// Helper functions on raw pointers // Helper functions on raw pointers
// //
inline void*
__valarray_get_memory(size_t __n)
{ return operator new(__n); }
// fill plain array __a[<__n>] with __t template<typename _Tp>
template<typename _Tp> inline _Tp*__restrict__
inline void __valarray_get_storage(size_t __n)
__valarray_fill (_Tp* __restrict__ __a, size_t __n, const _Tp& __t) {
{ while (__n--) *__a++ = __t; } return static_cast<_Tp*__restrict__>
(__valarray_get_memory(__n * sizeof(_Tp)));
}
// Return memory to the system
inline void
__valarray_release_storage(void* __p)
{ operator delete(__p); }
// fill strided array __a[<__n-1 : __s>] with __t // Turn a raw-memory into an array of _Tp filled with _Tp()
template<typename _Tp> // This is required in 'valarray<T> v(n);'
inline void template<typename _Tp, bool>
__valarray_fill (_Tp* __restrict__ __a, size_t __n, struct _Array_default_ctor
size_t __s, const _Tp& __t) {
{ for (size_t __i=0; __i<__n; ++__i, __a+=__s) *__a = __t; } // Please note that this isn't exception safe. But
// valarrays aren't required to be exception safe.
inline static void
_S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
{ while (__b != __e) new(__b++) _Tp(); }
};
// fill indirect array __a[__i[<__n>]] with __i template<typename _Tp>
template<typename _Tp> struct _Array_default_ctor<_Tp, true>
inline void {
__valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, // For fundamental types, it suffices to say 'memset()'
size_t __n, const _Tp& __t) inline static void
{ for (size_t __j=0; __j<__n; ++__j, ++__i) __a[*__i] = __t; } _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
{ memset(__b, 0, (__e - __b)*sizeof(_Tp)); }
};
// copy plain array __a[<__n>] in __b[<__n>] template<typename _Tp>
template<typename _Tp> inline void
inline void __valarray_default_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
__valarray_copy (const _Tp* __restrict__ __a, size_t __n, {
_Tp* __restrict__ __b) _Array_default_ctor<_Tp, __is_fundamental<_Tp>::_M_type>::
{ memcpy (__b, __a, __n * sizeof(_Tp)); } _S_do_it(__b, __e);
}
// Turn a raw-memory into an array of _Tp filled with __t
// This is the required in valarray<T> v(n, t). Also
// used in valarray<>::resize().
template<typename _Tp, bool>
struct _Array_init_ctor
{
// Please note that this isn't exception safe. But
// valarrays aren't required to be exception safe.
inline static void
_S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t)
{ while (__b != __e) new(__b++) _Tp(__t); }
};
template<typename _Tp>
struct _Array_init_ctor<_Tp, true>
{
inline static void
_S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t)
{ while (__b != __e) *__b++ = __t; }
};
template<typename _Tp>
inline void
__valarray_fill_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e,
const _Tp __t)
{
_Array_init_ctor<_Tp, __is_fundamental<_Tp>::_M_type>::
_S_do_it(__b, __e, __t);
}
//
// copy-construct raw array [__o, *) from plain array [__b, __e)
// We can't just say 'memcpy()'
//
template<typename _Tp, bool>
struct _Array_copy_ctor
{
// Please note that this isn't exception safe. But
// valarrays aren't required to be exception safe.
inline static void
_S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e,
_Tp* __restrict__ __o)
{ while (__b != __e) new(__o++) _Tp(*__b++); }
};
template<typename _Tp>
struct _Array_copy_ctor<_Tp, true>
{
inline static void
_S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e,
_Tp* __restrict__ __o)
{ memcpy(__o, __b, (__e - __b)*sizeof(_Tp)); }
};
template<typename _Tp>
inline void
__valarray_copy_construct(const _Tp* __restrict__ __b,
const _Tp* __restrict__ __e,
_Tp* __restrict__ __o)
{
_Array_copy_ctor<_Tp, __is_fundamental<_Tp>::_M_type>::
_S_do_it(__b, __e, __o);
}
// copy-construct raw array [__o, *) from strided array __a[<__n : __s>]
template<typename _Tp>
inline void
__valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n,
size_t __s, _Tp* __restrict__ __o)
{
if (__is_fundamental<_Tp>::_M_type)
while (__n--) { *__o++ = *__a; __a += __s; }
else
while (__n--) { new(__o++) _Tp(*__a); __a += __s; }
}
// copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]]
template<typename _Tp>
inline void
__valarray_copy_construct (const _Tp* __restrict__ __a,
const size_t* __restrict__ __i,
_Tp* __restrict__ __o, size_t __n)
{
if (__is_fundamental<_Tp>::_M_type)
while (__n--) *__o++ = __a[*__i++];
else
while (__n--) new (__o++) _Tp(__a[*__i++]);
}
// Do the necessary cleanup when we're done with arrays.
template<typename _Tp>
inline void
__valarray_destroy_elements(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
{
if (!__is_fundamental<_Tp>::_M_type)
while (__b != __e) { __b->~_Tp(); ++__b; }
}
// fill plain array __a[<__n>] with __t
template<typename _Tp>
inline void
__valarray_fill (_Tp* __restrict__ __a, size_t __n, const _Tp& __t)
{ while (__n--) *__a++ = __t; }
// fill strided array __a[<__n-1 : __s>] with __t
template<typename _Tp>
inline void
__valarray_fill (_Tp* __restrict__ __a, size_t __n,
size_t __s, const _Tp& __t)
{ for (size_t __i=0; __i<__n; ++__i, __a+=__s) *__a = __t; }
// fill indirect array __a[__i[<__n>]] with __i
template<typename _Tp>
inline void
__valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i,
size_t __n, const _Tp& __t)
{ for (size_t __j=0; __j<__n; ++__j, ++__i) __a[*__i] = __t; }
// copy plain array __a[<__n>] in __b[<__n>]
// For non-fundamental types, it is wrong to say 'memcpy()'
template<typename _Tp, bool>
struct _Array_copier
{
inline static void
_S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
{ while (__n--) *__b++ = *__a++; }
};
template<typename _Tp>
struct _Array_copier<_Tp, true>
{
inline static void
_S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
{ memcpy (__b, __a, __n * sizeof (_Tp)); }
};
template<typename _Tp>
inline void
__valarray_copy (const _Tp* __restrict__ __a, size_t __n,
_Tp* __restrict__ __b)
{
_Array_copier<_Tp, __is_fundamental<_Tp>::_M_type>::
_S_do_it(__a, __n, __b);
}
// copy strided array __a[<__n : __s>] in plain __b[<__n>] // copy strided array __a[<__n : __s>] in plain __b[<__n>]
template<typename _Tp> template<typename _Tp>
@ -97,6 +263,34 @@ __valarray_copy (const _Tp* __restrict__ __a, size_t __n,
_Tp* __restrict__ __b, const size_t* __restrict__ __i) _Tp* __restrict__ __b, const size_t* __restrict__ __i)
{ for (size_t __j=0; __j<__n; ++__j, ++__a, ++__i) __b[*__i] = *__a; } { for (size_t __j=0; __j<__n; ++__j, ++__a, ++__i) __b[*__i] = *__a; }
//
// Compute the sum of elements in range [__f, __l)
// This is a naive algorithm. It suffers from cancelling.
// In the future try to specialize
// for _Tp = float, double, long double using a more accurate
// algorithm.
//
template<typename _Tp>
inline _Tp
__valarray_sum(const _Tp* __restrict__ __f, const _Tp* __restrict__ __l)
{
_Tp __r = _Tp();
while (__f != __l) __r = __r + *__f++;
return __r;
}
// Compute the product of all elements in range [__f, __l)
template<typename _Tp>
_Tp
__valarray_product(const _Tp* __restrict__ __f,
const _Tp* __restrict__ __l)
{
_Tp __r = _Tp(1);
while (__f != __l) __r = __r * *__f++;
return __r;
}
// //
// Helper class _Array, first layer of valarray abstraction. // Helper class _Array, first layer of valarray abstraction.
// All operations on valarray should be forwarded to this class // All operations on valarray should be forwarded to this class
@ -110,7 +304,6 @@ template<typename _Tp> struct _Array {
explicit _Array (const valarray<_Tp>&); explicit _Array (const valarray<_Tp>&);
_Array (const _Tp* __restrict__, size_t); _Array (const _Tp* __restrict__, size_t);
void free_data() const;
_Tp* begin () const; _Tp* begin () const;
_Tp* const __restrict__ _M_data; _Tp* const __restrict__ _M_data;
@ -161,7 +354,9 @@ __valarray_copy (_Array<_Tp> __a, size_t __n, _Array<_Tp> __b,
template<typename _Tp> template<typename _Tp>
inline inline
_Array<_Tp>::_Array (size_t __n) : _M_data (new _Tp[__n]) {} _Array<_Tp>::_Array (size_t __n)
: _M_data (__valarray_get_storage<_Tp>(__n))
{ __valarray_default_construct(_M_data, _M_data + __n); }
template<typename _Tp> template<typename _Tp>
inline inline
@ -174,11 +369,8 @@ inline _Array<_Tp>::_Array (const valarray<_Tp>& __v)
template<typename _Tp> template<typename _Tp>
inline inline
_Array<_Tp>::_Array (const _Tp* __restrict__ __b, size_t __s) _Array<_Tp>::_Array (const _Tp* __restrict__ __b, size_t __s)
: _M_data (new _Tp[__s]) { __valarray_copy (__b, __s, _M_data); } : _M_data (__valarray_get_storage<_Tp>(__s ))
{ __valarray_copy_construct(__b, __s, _M_data); }
template<typename _Tp>
inline void
_Array<_Tp>::free_data() const { delete[] _M_data; }
template<typename _Tp> template<typename _Tp>
inline _Tp* inline _Tp*

View File

@ -1,9 +1,5 @@
#include <std/std_valarray.h> #include <std/std_valarray.h>
// Some Explicit Instanciations.
template class multiplies<size_t>;
template size_t accumulate(size_t*, size_t*, size_t, multiplies<size_t>);
template void template void
__valarray_fill(size_t* __restrict__, size_t, const size_t&); __valarray_fill(size_t* __restrict__, size_t, const size_t&);
@ -15,7 +11,19 @@ template valarray<size_t>::~valarray();
template valarray<size_t>::valarray(const valarray<size_t>&); template valarray<size_t>::valarray(const valarray<size_t>&);
template size_t valarray<size_t>::size() const; template size_t valarray<size_t>::size() const;
template size_t& valarray<size_t>::operator[](size_t); template size_t& valarray<size_t>::operator[](size_t);
template size_t valarray<size_t>::product() const;
inline size_t
__valarray_product(const valarray<size_t>& __a)
{
// XXX: This ugly cast is necessary because
// valarray::operator[]() const returns a VALUE!
// Try to get the committee to correct that gross error.
typedef const size_t* __restrict__ _Tp;
const size_t __n = __a.size();
valarray<size_t>& __t = const_cast<valarray<size_t>&>(__a);
return __valarray_product(&__t[0], &__t[0] + __n);
}
void __gslice_to_index(size_t __o, const valarray<size_t>& __l, void __gslice_to_index(size_t __o, const valarray<size_t>& __l,
@ -43,7 +51,7 @@ void __gslice_to_index(size_t __o, const valarray<size_t>& __l,
_Indexer::_Indexer(size_t __o, const valarray<size_t>& __l, _Indexer::_Indexer(size_t __o, const valarray<size_t>& __l,
const valarray<size_t>& __s) const valarray<size_t>& __s)
: _M_count(1), _M_start(__o), _M_size(__l), _M_stride(__s), : _M_count(1), _M_start(__o), _M_size(__l), _M_stride(__s),
_M_index(__l.size() ? __l.product() : 0) _M_index(__l.size() ? __valarray_product(__l) : 0)
{ __gslice_to_index(__o, __l, __s, _M_index); } { __gslice_to_index(__o, __l, __s, _M_index); }