Share memory blocks between pool allocators

gcc/
	* Makefile.in: Add memory-block.cc
	(pool_allocator::initialize): Use fixed block size.
	(pool_allocator::release): Use memory_block_pool.
	(pool_allocator::allocate): Likewise.
	* asan.c (asan_mem_ref_pool): Adjust to use common block size in all
	object pools.
	* cfg.c (initialize_original_copy_tables): Likewise.
	* cselib.c (elt_list_pool, elt_loc_list_pool,
	cselib_val_pool): Likewise.
	* df-problems.c (df_chain_alloc): Likewise.
	* df-scan.c (df_scan_alloc): Likewise.
	* dse.c (cse_store_info_pool, rtx_store_info_pool,
	read_info_type_pool, insn_info_type_pool, bb_info_pool,
	group_info_pool, deferred_change_pool): Likewise.
	* et-forest.c (et_nodes, et_occurrences): Likewise.
	* ipa-cp.c (ipcp_cst_values_pool, ipcp_sources_pool,
	ipcp_agg_lattice_pool): Likewise.
	* ipa-inline-analysis.c (edge_predicate_pool): Likewise.
	* ipa-profile.c (histogram_pool): Likewise.
	* ipa-prop.c (ipa_refdesc_pool): Likewise.
	* ira-build.c (live_range_pool, allocno_pool, object_pool,
	initiate_cost_vectors, pref_pool, copy_pool): Likewise.
	* ira-color.c (update_cost_record_pool): Likewise.
	* lra-lives.c (lra_live_range_pool): Likewise.
	* lra.c (lra_insn_reg_pool, lra_copy_pool): Likewise.
	* memory-block.cc: New file.
	* memory-block.h: New file.
	* regcprop.c (queued_debug_insn_change_pool): Use common block size.
	* sched-deps.c (sched_deps_init): Likewise.
	* sel-sched-ir.c (sched_lists_pool): Likewise.
	* stmt.c (expand_case, expand_sjlj_dispatch_table): Likewise.
	* tree-sra.c (access_pool): Likewise.
	* tree-ssa-math-opts.c (pass_cse_reciprocals::execute): Likewise.
	* tree-ssa-pre.c (pre_expr_pool, bitmap_set_pool): Likewise.
	* tree-ssa-reassoc.c (operand_entry_pool): Likewise.
	* tree-ssa-sccvn.c (allocate_vn_table): Likewise.
	* tree-ssa-strlen.c (strinfo_pool): Likewise.
	* tree-ssa-structalias.c (variable_info_pool): Likewise.
	* var-tracking.c (attrs_def_pool, var_pool, valvar_pool,
	location_chain_pool, shared_hash_pool, loc_exp_dep_pool): Likewise.

gcc/c-family/
	* c-format.c (check_format_arg): Adjust to use common block size in all
	object pools.

From-SVN: r227817
This commit is contained in:
Mikhail Maltsev 2015-09-16 00:56:54 +00:00 committed by Mikhail Maltsev
parent 5e4e62af0c
commit fcb87c50b0
35 changed files with 358 additions and 127 deletions

View File

@ -1,3 +1,46 @@
2015-09-16 Mikhail Maltsev <maltsevm@gmail.com>
* Makefile.in: Add memory-block.cc
(pool_allocator::initialize): Use fixed block size.
(pool_allocator::release): Use memory_block_pool.
(pool_allocator::allocate): Likewise.
* asan.c (asan_mem_ref_pool): Adjust to use common block size in all
object pools.
* cfg.c (initialize_original_copy_tables): Likewise.
* cselib.c (elt_list_pool, elt_loc_list_pool,
cselib_val_pool): Likewise.
* df-problems.c (df_chain_alloc): Likewise.
* df-scan.c (df_scan_alloc): Likewise.
* dse.c (cse_store_info_pool, rtx_store_info_pool,
read_info_type_pool, insn_info_type_pool, bb_info_pool,
group_info_pool, deferred_change_pool): Likewise.
* et-forest.c (et_nodes, et_occurrences): Likewise.
* ipa-cp.c (ipcp_cst_values_pool, ipcp_sources_pool,
ipcp_agg_lattice_pool): Likewise.
* ipa-inline-analysis.c (edge_predicate_pool): Likewise.
* ipa-profile.c (histogram_pool): Likewise.
* ipa-prop.c (ipa_refdesc_pool): Likewise.
* ira-build.c (live_range_pool, allocno_pool, object_pool,
initiate_cost_vectors, pref_pool, copy_pool): Likewise.
* ira-color.c (update_cost_record_pool): Likewise.
* lra-lives.c (lra_live_range_pool): Likewise.
* lra.c (lra_insn_reg_pool, lra_copy_pool): Likewise.
* memory-block.cc: New file.
* memory-block.h: New file.
* regcprop.c (queued_debug_insn_change_pool): Use common block size.
* sched-deps.c (sched_deps_init): Likewise.
* sel-sched-ir.c (sched_lists_pool): Likewise.
* stmt.c (expand_case, expand_sjlj_dispatch_table): Likewise.
* tree-sra.c (access_pool): Likewise.
* tree-ssa-math-opts.c (pass_cse_reciprocals::execute): Likewise.
* tree-ssa-pre.c (pre_expr_pool, bitmap_set_pool): Likewise.
* tree-ssa-reassoc.c (operand_entry_pool): Likewise.
* tree-ssa-sccvn.c (allocate_vn_table): Likewise.
* tree-ssa-strlen.c (strinfo_pool): Likewise.
* tree-ssa-structalias.c (variable_info_pool): Likewise.
* var-tracking.c (attrs_def_pool, var_pool, valvar_pool,
location_chain_pool, shared_hash_pool, loc_exp_dep_pool): Likewise.
2015-09-15 Max Filippov <jcmvbkbc@gmail.com>
* config/xtensa/xtensa.h (DWARF_ALT_FRAME_RETURN_COLUMN): New

View File

@ -1513,7 +1513,7 @@ OBJS = \
# Objects in libcommon.a, potentially used by all host binaries and with
# no target dependencies.
OBJS-libcommon = diagnostic.o diagnostic-color.o pretty-print.o intl.o \
vec.o input.o version.o hash-table.o ggc-none.o
vec.o input.o version.o hash-table.o ggc-none.o memory-block.o
# Objects in libcommon-target.a, used by drivers and by the core
# compiler and containing target-dependent code.

View File

@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef ALLOC_POOL_H
#define ALLOC_POOL_H
#include "memory-block.h"
extern void dump_alloc_pool_statistics (void);
@ -95,18 +96,53 @@ struct pool_usage: public mem_usage
extern mem_alloc_description<pool_usage> pool_allocator_usage;
/* Generic pool allocator. */
class pool_allocator
#if 0
/* If a pool with custom block size is needed, one might use the following
template. An instance of this template can be used as a parameter for
instantiating base_pool_allocator template:
typedef custom_block_allocator <128*1024> huge_block_allocator;
...
static base_pool_allocator <huge_block_allocator>
value_pool ("value", 16384);
Right now it's not used anywhere in the code, and is given here as an
example). */
template <size_t BlockSize>
class custom_block_allocator
{
public:
/* Default constructor for pool allocator called NAME. Each block
has NUM elements. */
pool_allocator (const char *name, size_t num, size_t size CXX_MEM_STAT_INFO);
~pool_allocator ();
static const size_t block_size = BlockSize;
static inline void *
allocate () ATTRIBUTE_MALLOC
{
return XNEWVEC (char, BlockSize);
}
static inline void
release (void *block)
{
XDELETEVEC (block);
}
};
#endif
/* Generic pool allocator. */
template <typename TBlockAllocator>
class base_pool_allocator
{
public:
/* Default constructor for pool allocator called NAME. */
base_pool_allocator (const char *name, size_t size CXX_MEM_STAT_INFO);
~base_pool_allocator ();
void release ();
void release_if_empty ();
void *allocate () ATTRIBUTE_MALLOC;
void remove (void *object);
size_t num_elts_current ();
private:
struct allocation_pool_list
@ -151,7 +187,7 @@ private:
};
/* Align X to 8. */
size_t
static inline size_t
align_eight (size_t x)
{
return (((x+7) >> 3) << 3);
@ -180,8 +216,6 @@ private:
size_t m_blocks_allocated;
/* List of blocks that are used to allocate new objects. */
allocation_pool_list *m_block_list;
/* The number of elements in a block. */
size_t m_block_size;
/* Size of a pool elements in bytes. */
size_t m_elt_size;
/* Size in bytes that should be allocated for each element. */
@ -192,24 +226,24 @@ private:
mem_location m_location;
};
template <typename TBlockAllocator>
inline
pool_allocator::pool_allocator (const char *name, size_t num,
size_t size MEM_STAT_DECL):
m_name (name), m_id (0), m_elts_per_block (num), m_returned_free_list (NULL),
base_pool_allocator <TBlockAllocator>::base_pool_allocator (
const char *name, size_t size MEM_STAT_DECL):
m_name (name), m_id (0), m_elts_per_block (0), m_returned_free_list (NULL),
m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL),
m_block_size (0), m_size (size), m_initialized (false),
m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL), m_size (size),
m_initialized (false), m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
/* Initialize a pool allocator. */
template <typename TBlockAllocator>
inline void
pool_allocator::initialize ()
base_pool_allocator <TBlockAllocator>::initialize ()
{
gcc_checking_assert (!m_initialized);
m_initialized = true;
size_t header_size;
size_t size = m_size;
gcc_checking_assert (m_name);
@ -218,15 +252,12 @@ pool_allocator::initialize ()
if (size < sizeof (allocation_pool_list*))
size = sizeof (allocation_pool_list*);
/* Now align the size to a multiple of 4. */
/* Now align the size to a multiple of 8. */
size = align_eight (size);
/* Add the aligned size of ID. */
size += offsetof (allocation_object, u.data);
/* Um, we can't really allocate 0 elements per block. */
gcc_checking_assert (m_elts_per_block);
m_elt_size = size;
if (GATHER_STATISTICS)
@ -239,9 +270,10 @@ pool_allocator::initialize ()
}
/* List header size should be a multiple of 8. */
header_size = align_eight (sizeof (allocation_pool_list));
size_t header_size = align_eight (sizeof (allocation_pool_list));
m_block_size = (size * m_elts_per_block) + header_size;
m_elts_per_block = (TBlockAllocator::block_size - header_size) / size;
gcc_checking_assert (m_elts_per_block != 0);
#ifdef ENABLE_CHECKING
/* Increase the last used ID and use it for this pool.
@ -255,8 +287,9 @@ pool_allocator::initialize ()
}
/* Free all memory allocated for the given memory pool. */
template <typename TBlockAllocator>
inline void
pool_allocator::release ()
base_pool_allocator <TBlockAllocator>::release ()
{
if (!m_initialized)
return;
@ -267,7 +300,7 @@ pool_allocator::release ()
for (block = m_block_list; block != NULL; block = next_block)
{
next_block = block->next;
free (block);
TBlockAllocator::release (block);
}
if (GATHER_STATISTICS)
@ -285,21 +318,24 @@ pool_allocator::release ()
m_block_list = NULL;
}
void
inline pool_allocator::release_if_empty ()
template <typename TBlockAllocator>
inline void
base_pool_allocator <TBlockAllocator>::release_if_empty ()
{
if (m_elts_free == m_elts_allocated)
release ();
}
inline pool_allocator::~pool_allocator ()
template <typename TBlockAllocator>
inline base_pool_allocator <TBlockAllocator>::~base_pool_allocator ()
{
release ();
}
/* Allocates one element from the pool specified. */
template <typename TBlockAllocator>
inline void*
pool_allocator::allocate ()
base_pool_allocator <TBlockAllocator>::allocate ()
{
if (!m_initialized)
initialize ();
@ -327,7 +363,7 @@ pool_allocator::allocate ()
allocation_pool_list *block_header;
/* Make the block. */
block = XNEWVEC (char, m_block_size);
block = reinterpret_cast<char *> (TBlockAllocator::allocate ());
block_header = (allocation_pool_list*) block;
block += align_eight (sizeof (allocation_pool_list));
@ -378,8 +414,9 @@ pool_allocator::allocate ()
}
/* Puts PTR back on POOL's free list. */
template <typename TBlockAllocator>
inline void
pool_allocator::remove (void *object)
base_pool_allocator <TBlockAllocator>::remove (void *object)
{
gcc_checking_assert (m_initialized);
@ -412,15 +449,28 @@ pool_allocator::remove (void *object)
}
}
/* Number of elements currently active (not returned to pool). Used for cheap
consistency checks. */
template <typename TBlockAllocator>
inline size_t
base_pool_allocator <TBlockAllocator>::num_elts_current ()
{
return m_elts_allocated - m_elts_free;
}
/* Specialization of base_pool_allocator which should be used in most cases.
Another specialization may be needed, if object size is greater than
memory_block_pool::block_size (64 KB). */
typedef base_pool_allocator <memory_block_pool> pool_allocator;
/* Type based memory pool allocator. */
template <typename T>
class object_allocator
{
public:
/* Default constructor for pool allocator called NAME. Each block
has NUM elements. */
object_allocator (const char *name, size_t num CXX_MEM_STAT_INFO):
m_allocator (name, num, sizeof (T) PASS_MEM_STAT) {}
/* Default constructor for pool allocator called NAME. */
object_allocator (const char *name CXX_MEM_STAT_INFO):
m_allocator (name, sizeof (T) PASS_MEM_STAT) {}
inline void
release ()
@ -448,6 +498,12 @@ public:
m_allocator.remove (object);
}
inline size_t
num_elts_current ()
{
return m_allocator.num_elts_current ();
}
private:
pool_allocator m_allocator;
};

View File

@ -350,7 +350,7 @@ struct asan_mem_ref
HOST_WIDE_INT access_size;
};
object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref", 10);
object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref");
/* Initializes an instance of asan_mem_ref. */

View File

@ -1,3 +1,8 @@
2015-09-16 Mikhail Maltsev <maltsevm@gmail.com>
* c-format.c (check_format_arg): Adjust to use common block size in all
object pools.
2015-09-15 David Malcolm <dmalcolm@redhat.com>
* c-format.c (location_from_offset): Update for change in

View File

@ -1687,8 +1687,7 @@ check_format_arg (void *ctx, tree format_tree,
will decrement it if it finds there are extra arguments, but this way
need not adjust it for every return. */
res->number_other++;
object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool",
10);
object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool");
check_format_info_main (res, info, format_chars, format_length,
params, arg_num, fwt_pool);
}

View File

@ -1052,7 +1052,7 @@ void
initialize_original_copy_tables (void)
{
original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
("original_copy", 10);
("original_copy");
bb_original = new hash_table<bb_copy_hasher> (10);
bb_copy = new hash_table<bb_copy_hasher> (10);
loop_copy = new hash_table<bb_copy_hasher> (10);

View File

@ -225,9 +225,16 @@ struct basic_block_def;
typedef struct basic_block_def *basic_block;
typedef const struct basic_block_def *const_basic_block;
#define obstack_chunk_alloc xmalloc
#define obstack_chunk_free free
#define OBSTACK_CHUNK_SIZE 0
#if !defined (GENERATOR_FILE)
# define OBSTACK_CHUNK_SIZE memory_block_pool::block_size
# define obstack_chunk_alloc mempool_obstack_chunk_alloc
# define obstack_chunk_free mempool_obstack_chunk_free
#else
# define OBSTACK_CHUNK_SIZE 0
# define obstack_chunk_alloc xmalloc
# define obstack_chunk_free free
#endif
#define gcc_obstack_init(OBSTACK) \
obstack_specify_allocation ((OBSTACK), OBSTACK_CHUNK_SIZE, 0, \
obstack_chunk_alloc, \
@ -328,6 +335,7 @@ typedef unsigned char uchar;
#include "hash-set.h"
#include "input.h"
#include "is-a.h"
#include "memory-block.h"
#endif /* GENERATOR_FILE && !USED_FOR_TARGET */
#endif /* coretypes.h */

View File

@ -246,11 +246,11 @@ static unsigned int cfa_base_preserved_regno = INVALID_REGNUM;
each time memory is invalidated. */
static cselib_val *first_containing_mem = &dummy_val;
static object_allocator<elt_list> elt_list_pool ("elt_list", 10);
static object_allocator<elt_loc_list> elt_loc_list_pool ("elt_loc_list", 10);
static object_allocator<cselib_val> cselib_val_pool ("cselib_val_list", 10);
static object_allocator<elt_list> elt_list_pool ("elt_list");
static object_allocator<elt_loc_list> elt_loc_list_pool ("elt_loc_list");
static object_allocator<cselib_val> cselib_val_pool ("cselib_val_list");
static pool_allocator value_pool ("value", 100, RTX_CODE_SIZE (VALUE));
static pool_allocator value_pool ("value", RTX_CODE_SIZE (VALUE));
/* If nonnull, cselib will call this function before freeing useless
VALUEs. A VALUE is deemed useless if its "locs" field is null. */

View File

@ -1997,8 +1997,7 @@ static void
df_chain_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
df_chain_remove_problem ();
df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool",
50);
df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool");
df_chain->optional_p = true;
}

View File

@ -133,8 +133,6 @@ static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
it gets run. It also has no need for the iterative solver.
----------------------------------------------------------------------------*/
#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512
/* Problem data for the scanning dataflow function. */
struct df_scan_problem_data
{
@ -253,17 +251,17 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
df_scan->computed = true;
problem_data->ref_base_pool = new object_allocator<df_base_ref>
("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
("df_scan ref base");
problem_data->ref_artificial_pool = new object_allocator<df_artificial_ref>
("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
("df_scan ref artificial");
problem_data->ref_regular_pool = new object_allocator<df_regular_ref>
("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
("df_scan ref regular");
problem_data->insn_pool = new object_allocator<df_insn_info>
("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
("df_scan insn");
problem_data->reg_pool = new object_allocator<df_reg_info>
("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
("df_scan reg");
problem_data->mw_reg_pool = new object_allocator<df_mw_hardreg>
("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
("df_scan mw_reg");
bitmap_obstack_initialize (&problem_data->reg_bitmaps);
bitmap_obstack_initialize (&problem_data->insn_bitmaps);

View File

@ -307,11 +307,9 @@ lowpart_bitmask (int n)
return mask >> (HOST_BITS_PER_WIDE_INT - n);
}
static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
100);
static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool");
static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
100);
static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool");
/* This structure holds information about a load. These are only
built for rtx bases. */
@ -336,8 +334,7 @@ struct read_info_type
};
typedef struct read_info_type *read_info_t;
static object_allocator<read_info_type> read_info_type_pool
("read_info_pool", 100);
static object_allocator<read_info_type> read_info_type_pool ("read_info_pool");
/* One of these records is created for each insn. */
@ -426,8 +423,7 @@ struct insn_info_type
};
typedef struct insn_info_type *insn_info_t;
static object_allocator<insn_info_type> insn_info_type_pool
("insn_info_pool", 100);
static object_allocator<insn_info_type> insn_info_type_pool ("insn_info_pool");
/* The linked list of stores that are under consideration in this
basic block. */
@ -494,7 +490,7 @@ struct dse_bb_info_type
typedef struct dse_bb_info_type *bb_info_t;
static object_allocator<dse_bb_info_type> dse_bb_info_type_pool
("bb_info_pool", 100);
("bb_info_pool");
/* Table to hold all bb_infos. */
static bb_info_t *bb_table;
@ -564,8 +560,7 @@ struct group_info
int offset_map_size_n, offset_map_size_p;
};
static object_allocator<group_info> group_info_pool
("rtx_group_info_pool", 100);
static object_allocator<group_info> group_info_pool ("rtx_group_info_pool");
/* Index into the rtx_group_vec. */
static int rtx_group_next_id;
@ -589,7 +584,7 @@ struct deferred_change
};
static object_allocator<deferred_change> deferred_change_pool
("deferred_change_pool", 10);
("deferred_change_pool");
static deferred_change *deferred_change_list = NULL;

View File

@ -54,8 +54,8 @@ struct et_occ
depth. */
};
static object_allocator<et_node> et_nodes ("et_nodes pool", 300);
static object_allocator<et_occ> et_occurrences ("et_occ pool", 300);
static object_allocator<et_node> et_nodes ("et_nodes pool");
static object_allocator<et_occ> et_occurrences ("et_occ pool");
/* Changes depth of OCC to D. */

View File

@ -276,16 +276,16 @@ public:
/* Allocation pools for values and their sources in ipa-cp. */
object_allocator<ipcp_value<tree> > ipcp_cst_values_pool
("IPA-CP constant values", 32);
("IPA-CP constant values");
object_allocator<ipcp_value<ipa_polymorphic_call_context> >
ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts", 32);
ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts");
object_allocator<ipcp_value_source<tree> > ipcp_sources_pool
("IPA-CP value sources", 64);
("IPA-CP value sources");
object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
("IPA_CP aggregate lattices", 32);
("IPA_CP aggregate lattices");
/* Maximal count found in program. */

View File

@ -143,7 +143,7 @@ vec<inline_edge_summary_t> inline_edge_summary_vec;
vec<edge_growth_cache_entry> edge_growth_cache;
/* Edge predicates goes here. */
static object_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
static object_allocator<predicate> edge_predicate_pool ("edge predicates");
/* Return true predicate (tautology).
We represent it by empty list of clauses. */

View File

@ -87,8 +87,7 @@ struct histogram_entry
duplicate entries. */
vec<histogram_entry *> histogram;
static object_allocator<histogram_entry> histogram_pool
("IPA histogram", 10);
static object_allocator<histogram_entry> histogram_pool ("IPA histogram");
/* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR. */

View File

@ -95,7 +95,7 @@ struct ipa_cst_ref_desc
/* Allocation pool for reference descriptions. */
static object_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
("IPA-PROP ref descriptions", 32);
("IPA-PROP ref descriptions");
/* Return true if DECL_FUNCTION_SPECIFIC_OPTIMIZATION of the decl associated
with NODE should prevent us from analyzing it for the purposes of IPA-CP. */

View File

@ -420,9 +420,9 @@ rebuild_regno_allocno_maps (void)
/* Pools for allocnos, allocno live ranges and objects. */
static object_allocator<live_range> live_range_pool ("live ranges", 100);
static object_allocator<ira_allocno> allocno_pool ("allocnos", 100);
static object_allocator<ira_object> object_pool ("objects", 100);
static object_allocator<live_range> live_range_pool ("live ranges");
static object_allocator<ira_allocno> allocno_pool ("allocnos");
static object_allocator<ira_object> object_pool ("objects");
/* Vec containing references to all created allocnos. It is a
container of array allocnos. */
@ -1170,7 +1170,7 @@ finish_allocnos (void)
/* Pools for allocno preferences. */
static object_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
static object_allocator <ira_allocno_pref> pref_pool ("prefs");
/* Vec containing references to all created preferences. It is a
container of array ira_prefs. */
@ -1357,7 +1357,7 @@ finish_prefs (void)
/* Pools for copies. */
static object_allocator<ira_allocno_copy> copy_pool ("copies", 100);
static object_allocator<ira_allocno_copy> copy_pool ("copies");
/* Vec containing references to all created copies. It is a
container of array ira_copies. */
@ -1630,8 +1630,7 @@ initiate_cost_vectors (void)
{
aclass = ira_allocno_classes[i];
cost_vector_pool[aclass] = new pool_allocator
("cost vectors", 100,
sizeof (int) * (ira_class_hard_regs_num[aclass]));
("cost vectors", sizeof (int) * (ira_class_hard_regs_num[aclass]));
}
}

View File

@ -1157,7 +1157,7 @@ setup_profitable_hard_regs (void)
/* Pool for update cost records. */
static object_allocator<update_cost_record> update_cost_record_pool
("update cost records", 100);
("update cost records");
/* Return new update cost record with given params. */
static struct update_cost_record *

View File

@ -107,8 +107,7 @@ static sparseset unused_set, dead_set;
static bitmap_head temp_bitmap;
/* Pool for pseudo live ranges. */
static object_allocator<lra_live_range> lra_live_range_pool
("live ranges", 100);
static object_allocator<lra_live_range> lra_live_range_pool ("live ranges");
/* Free live range list LR. */
static void

View File

@ -533,7 +533,7 @@ lra_update_dups (lra_insn_recog_data_t id, signed char *nops)
insns. */
/* Pools for insn reg info. */
object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs", 100);
object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs");
/* Create LRA insn related info about a reference to REGNO in INSN with
TYPE (in/out/inout), biggest reference mode MODE, flag that it is
@ -744,7 +744,7 @@ free_insn_recog_data (lra_insn_recog_data_t data)
}
/* Pools for copies. */
static object_allocator<lra_copy> lra_copy_pool ("lra copies", 100);
static object_allocator<lra_copy> lra_copy_pool ("lra copies");
/* Finish LRA data about all insns. */
static void

64
gcc/memory-block.cc Normal file
View File

@ -0,0 +1,64 @@
/* Shared pool of memory blocks for pool allocators.
Copyright (C) 2015 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "memory-block.h"
#include "obstack.h"
/* Global singleton-like instance. */
memory_block_pool memory_block_pool::instance;
memory_block_pool::memory_block_pool () : m_blocks (NULL) {}
/* Return all blocks from free list to the OS. */
void
memory_block_pool::clear_free_list ()
{
while (m_blocks)
{
block_list *next = m_blocks->m_next;
XDELETEVEC (m_blocks);
m_blocks = next;
}
}
/* Allocate a chunk for obstack. Use the pool if requested chunk size matches
the size of blocks in the pool. */
void *
mempool_obstack_chunk_alloc (size_t size)
{
if (size == memory_block_pool::block_size)
return memory_block_pool::allocate ();
else
return XNEWVEC (char, size);
}
/* Free previously allocated obstack chunk. */
void
mempool_obstack_chunk_free (void *chunk)
{
size_t size = (reinterpret_cast<_obstack_chunk *> (chunk)->limit
- reinterpret_cast<char *> (chunk));
if (size == memory_block_pool::block_size)
memory_block_pool::release (chunk);
else
XDELETEVEC (chunk);
}

75
gcc/memory-block.h Normal file
View File

@ -0,0 +1,75 @@
/* Shared pool of memory blocks for pool allocators.
Copyright (C) 2015 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef MEMORY_BLOCK_H
#define MEMORY_BLOCK_H
/* Shared pool which allows other memory pools to reuse each others' allocated
memory blocks instead of calling free/malloc again. */
class memory_block_pool
{
public:
/* Blocks have fixed size. This is necessary for sharing. */
static const size_t block_size = 64 * 1024;
memory_block_pool ();
static inline void *allocate () ATTRIBUTE_MALLOC;
static inline void release (void *);
void clear_free_list ();
private:
/* memory_block_pool singleton instance, defined in memory-block.cc. */
static memory_block_pool instance;
struct block_list
{
block_list *m_next;
};
/* Free list. */
block_list *m_blocks;
};
/* Allocate a single block. Reuse a previously returned block, if possible. */
inline void *
memory_block_pool::allocate ()
{
if (instance.m_blocks == NULL)
return XNEWVEC (char, block_size);
void *result = instance.m_blocks;
instance.m_blocks = instance.m_blocks->m_next;
return result;
}
/* Return UNCAST_BLOCK to the pool. */
inline void
memory_block_pool::release (void *uncast_block)
{
block_list *block = new (uncast_block) block_list;
block->m_next = instance.m_blocks;
instance.m_blocks = block;
}
extern void *mempool_obstack_chunk_alloc (size_t) ATTRIBUTE_MALLOC;
extern void mempool_obstack_chunk_free (void *);
#endif /* MEMORY_BLOCK_H */

View File

@ -75,7 +75,7 @@ struct value_data
};
static object_allocator<queued_debug_insn_change> queued_debug_insn_change_pool
("debug insn changes pool", 256);
("debug insn changes pool");
static bool skip_debug_insn_p;

View File

@ -4059,14 +4059,10 @@ sched_deps_init (bool global_p)
if (global_p)
{
dl_pool = new object_allocator<_deps_list> ("deps_list",
/* Allocate lists for one block at a time. */
insns_in_block);
dn_pool = new object_allocator<_dep_node> ("dep_node",
/* Allocate nodes for one block at a time.
We assume that average insn has
5 producers. */
5 * insns_in_block);
dl_pool = new object_allocator<_deps_list> ("deps_list");
/* Allocate lists for one block at a time. */
dn_pool = new object_allocator<_dep_node> ("dep_node");
/* Allocate nodes for one block at a time. */
}
}

View File

@ -59,7 +59,7 @@ vec<sel_region_bb_info_def>
sel_region_bb_info = vNULL;
/* A pool for allocating all lists. */
object_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
object_allocator<_list_node> sched_lists_pool ("sel-sched-lists");
/* This contains information about successors for compute_av_set. */
struct succs_info current_succs;

View File

@ -1138,7 +1138,7 @@ expand_case (gswitch *stmt)
struct case_node *case_list = 0;
/* A pool for case nodes. */
object_allocator<case_node> case_node_pool ("struct case_node pool", 100);
object_allocator<case_node> case_node_pool ("struct case_node pool");
/* An ERROR_MARK occurs for various reasons including invalid data type.
??? Can this still happen, with GIMPLE and all? */
@ -1314,8 +1314,7 @@ expand_sjlj_dispatch_table (rtx dispatch_index,
{
/* Similar to expand_case, but much simpler. */
struct case_node *case_list = 0;
object_allocator<case_node> case_node_pool ("struct sjlj_case pool",
ncases);
object_allocator<case_node> case_node_pool ("struct sjlj_case pool");
tree index_expr = make_tree (index_type, dispatch_index);
tree minval = build_int_cst (index_type, 0);
tree maxval = CASE_LOW (dispatch_table.last ());

View File

@ -277,7 +277,7 @@ typedef struct access *access_p;
/* Alloc pool for allocating access structures. */
static object_allocator<struct access> access_pool ("SRA accesses", 16);
static object_allocator<struct access> access_pool ("SRA accesses");
/* A structure linking lhs and rhs accesses from an aggregate assignment. They
are used to propagate subaccesses from rhs to lhs as long as they don't
@ -289,7 +289,7 @@ struct assign_link
};
/* Alloc pool for allocating assign link structures. */
static object_allocator<assign_link> assign_link_pool ("SRA links", 16);
static object_allocator<assign_link> assign_link_pool ("SRA links");
/* Base (tree) -> Vector (vec<access_p> *) map. */
static hash_map<tree, auto_vec<access_p> > *base_access_vec;

View File

@ -547,8 +547,7 @@ pass_cse_reciprocals::execute (function *fun)
basic_block bb;
tree arg;
occ_pool = new object_allocator<occurrence>
("dominators for recip", n_basic_blocks_for_fn (fun) / 3 + 1);
occ_pool = new object_allocator<occurrence> ("dominators for recip");
memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
calculate_dominance_info (CDI_DOMINATORS);

View File

@ -349,7 +349,7 @@ clear_expression_ids (void)
expressions.release ();
}
static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes", 30);
static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes");
/* Given an SSA_NAME NAME, get or create a pre_expr to represent it. */
@ -488,7 +488,7 @@ static unsigned int get_expr_value_id (pre_expr);
/* We can add and remove elements and entries to and from sets
and hash tables, so we use alloc pools for them. */
static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets", 30);
static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets");
static bitmap_obstack grand_bitmap_obstack;
/* Set of blocks with statements that have had their EH properties changed. */

View File

@ -209,8 +209,8 @@ typedef struct operand_entry
unsigned int count;
} *operand_entry_t;
static object_allocator<operand_entry> operand_entry_pool ("operand entry pool",
30);
static object_allocator<operand_entry> operand_entry_pool
("operand entry pool");
/* This is used to assign a unique ID to each struct operand_entry
so that qsort results are identical on different hosts. */

View File

@ -4146,9 +4146,9 @@ allocate_vn_table (vn_tables_t table)
table->references = new vn_reference_table_type (23);
gcc_obstack_init (&table->nary_obstack);
table->phis_pool = new object_allocator<vn_phi_s> ("VN phis", 30);
table->phis_pool = new object_allocator<vn_phi_s> ("VN phis");
table->references_pool = new object_allocator<vn_reference_s>
("VN references", 30);
("VN references");
}
/* Free a value number table. */

View File

@ -113,8 +113,7 @@ typedef struct strinfo_struct
} *strinfo;
/* Pool for allocating strinfo_struct entries. */
static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool",
64);
static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool");
/* Vector mapping positive string indexes to strinfo, for the
current basic block. The first pointer in the vector is special,

View File

@ -323,7 +323,7 @@ static inline bool type_can_have_subvars (const_tree);
/* Pool of variable info structures. */
static object_allocator<variable_info> variable_info_pool
("Variable info pool", 30);
("Variable info pool");
/* Map varinfo to final pt_solution. */
static hash_map<varinfo_t, pt_solution *> *final_solutions;
@ -523,7 +523,7 @@ struct constraint
/* List of constraints that we use to build the constraint graph from. */
static vec<constraint_t> constraints;
static object_allocator<constraint> constraint_pool ("Constraint pool", 30);
static object_allocator<constraint> constraint_pool ("Constraint pool");
/* The constraint graph is represented as an array of bitmaps
containing successor nodes. */

View File

@ -576,28 +576,27 @@ typedef struct variable_tracking_info_def
} *variable_tracking_info;
/* Alloc pool for struct attrs_def. */
object_allocator<attrs_def> attrs_def_pool ("attrs_def pool", 1024);
object_allocator<attrs_def> attrs_def_pool ("attrs_def pool");
/* Alloc pool for struct variable_def with MAX_VAR_PARTS entries. */
static pool_allocator var_pool
("variable_def pool", 64, sizeof (variable_def) +
("variable_def pool", sizeof (variable_def) +
(MAX_VAR_PARTS - 1) * sizeof (((variable)NULL)->var_part[0]));
/* Alloc pool for struct variable_def with a single var_part entry. */
static pool_allocator valvar_pool
("small variable_def pool", 256, sizeof (variable_def));
("small variable_def pool", sizeof (variable_def));
/* Alloc pool for struct location_chain_def. */
/* Alloc pool for struct location_chain. */
static object_allocator<location_chain> location_chain_pool
("location_chain pool", 1024);
("location_chain pool");
/* Alloc pool for struct shared_hash_def. */
static object_allocator<shared_hash> shared_hash_pool
("shared_hash pool", 256);
/* Alloc pool for struct shared_hash. */
static object_allocator<shared_hash> shared_hash_pool ("shared_hash pool");
/* Alloc pool for struct loc_exp_dep_s for NOT_ONEPART variables. */
object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool", 64);
object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool");
/* Changed variables, notes will be emitted for them. */
static variable_table_type *changed_variables;