Change use to type-based pool allocator in df-scan.c.

* df-scan.c (struct df_scan_problem_data):Use new type-based pool allocator.
	(df_scan_free_internal) Likewise.
	(df_scan_alloc) Likewise.
	(df_grow_reg_info) Likewise.
	(df_free_ref) Likewise.
	(df_insn_create_insn_record) Likewise.
	(df_mw_hardreg_chain_delete) Likewise.
	(df_insn_info_delete) Likewise.
	(df_free_collection_rec) Likewise.
	(df_mw_hardreg_chain_delete_eq_uses) Likewise.
	(df_sort_and_compress_mws) Likewise.
	(df_ref_create_structure) Likewise.
	(df_ref_record) Likewise.

From-SVN: r223954
This commit is contained in:
Martin Liska 2015-06-01 14:41:18 +02:00 committed by Martin Liska
parent 295e704728
commit e956943eb1
2 changed files with 62 additions and 48 deletions

View File

@ -1,3 +1,19 @@
2015-06-01 Martin Liska <mliska@suse.cz>
* df-scan.c (struct df_scan_problem_data):Use new type-based pool allocator.
(df_scan_free_internal) Likewise.
(df_scan_alloc) Likewise.
(df_grow_reg_info) Likewise.
(df_free_ref) Likewise.
(df_insn_create_insn_record) Likewise.
(df_mw_hardreg_chain_delete) Likewise.
(df_insn_info_delete) Likewise.
(df_free_collection_rec) Likewise.
(df_mw_hardreg_chain_delete_eq_uses) Likewise.
(df_sort_and_compress_mws) Likewise.
(df_ref_create_structure) Likewise.
(df_ref_record) Likewise.
2015-06-01 Martin Liska <mliska@suse.cz> 2015-06-01 Martin Liska <mliska@suse.cz>
* df-problems.c (df_chain_create):Use new type-based pool allocator. * df-problems.c (df_chain_create):Use new type-based pool allocator.

View File

@ -159,15 +159,18 @@ static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
it gets run. It also has no need for the iterative solver. it gets run. It also has no need for the iterative solver.
----------------------------------------------------------------------------*/ ----------------------------------------------------------------------------*/
#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512
/* Problem data for the scanning dataflow function. */ /* Problem data for the scanning dataflow function. */
struct df_scan_problem_data struct df_scan_problem_data
{ {
alloc_pool ref_base_pool; pool_allocator<df_base_ref> *ref_base_pool;
alloc_pool ref_artificial_pool; pool_allocator<df_artificial_ref> *ref_artificial_pool;
alloc_pool ref_regular_pool; pool_allocator<df_regular_ref> *ref_regular_pool;
alloc_pool insn_pool; pool_allocator<df_insn_info> *insn_pool;
alloc_pool reg_pool; pool_allocator<df_reg_info> *reg_pool;
alloc_pool mw_reg_pool; pool_allocator<df_mw_hardreg> *mw_reg_pool;
bitmap_obstack reg_bitmaps; bitmap_obstack reg_bitmaps;
bitmap_obstack insn_bitmaps; bitmap_obstack insn_bitmaps;
}; };
@ -218,12 +221,12 @@ df_scan_free_internal (void)
bitmap_clear (&df->insns_to_rescan); bitmap_clear (&df->insns_to_rescan);
bitmap_clear (&df->insns_to_notes_rescan); bitmap_clear (&df->insns_to_notes_rescan);
free_alloc_pool (problem_data->ref_base_pool); delete problem_data->ref_base_pool;
free_alloc_pool (problem_data->ref_artificial_pool); delete problem_data->ref_artificial_pool;
free_alloc_pool (problem_data->ref_regular_pool); delete problem_data->ref_regular_pool;
free_alloc_pool (problem_data->insn_pool); delete problem_data->insn_pool;
free_alloc_pool (problem_data->reg_pool); delete problem_data->reg_pool;
free_alloc_pool (problem_data->mw_reg_pool); delete problem_data->mw_reg_pool;
bitmap_obstack_release (&problem_data->reg_bitmaps); bitmap_obstack_release (&problem_data->reg_bitmaps);
bitmap_obstack_release (&problem_data->insn_bitmaps); bitmap_obstack_release (&problem_data->insn_bitmaps);
free (df_scan->problem_data); free (df_scan->problem_data);
@ -264,7 +267,6 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{ {
struct df_scan_problem_data *problem_data; struct df_scan_problem_data *problem_data;
unsigned int insn_num = get_max_uid () + 1; unsigned int insn_num = get_max_uid () + 1;
unsigned int block_size = 512;
basic_block bb; basic_block bb;
/* Given the number of pools, this is really faster than tearing /* Given the number of pools, this is really faster than tearing
@ -276,24 +278,18 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
df_scan->problem_data = problem_data; df_scan->problem_data = problem_data;
df_scan->computed = true; df_scan->computed = true;
problem_data->ref_base_pool problem_data->ref_base_pool = new pool_allocator<df_base_ref>
= create_alloc_pool ("df_scan ref base", ("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
sizeof (struct df_base_ref), block_size); problem_data->ref_artificial_pool = new pool_allocator<df_artificial_ref>
problem_data->ref_artificial_pool ("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
= create_alloc_pool ("df_scan ref artificial", problem_data->ref_regular_pool = new pool_allocator<df_regular_ref>
sizeof (struct df_artificial_ref), block_size); ("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
problem_data->ref_regular_pool problem_data->insn_pool = new pool_allocator<df_insn_info>
= create_alloc_pool ("df_scan ref regular", ("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
sizeof (struct df_regular_ref), block_size); problem_data->reg_pool = new pool_allocator<df_reg_info>
problem_data->insn_pool ("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
= create_alloc_pool ("df_scan insn", problem_data->mw_reg_pool = new pool_allocator<df_mw_hardreg>
sizeof (struct df_insn_info), block_size); ("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
problem_data->reg_pool
= create_alloc_pool ("df_scan reg",
sizeof (struct df_reg_info), block_size);
problem_data->mw_reg_pool
= create_alloc_pool ("df_scan mw_reg",
sizeof (struct df_mw_hardreg), block_size / 16);
bitmap_obstack_initialize (&problem_data->reg_bitmaps); bitmap_obstack_initialize (&problem_data->reg_bitmaps);
bitmap_obstack_initialize (&problem_data->insn_bitmaps); bitmap_obstack_initialize (&problem_data->insn_bitmaps);
@ -519,13 +515,14 @@ df_grow_reg_info (void)
{ {
struct df_reg_info *reg_info; struct df_reg_info *reg_info;
reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool); // TODO
reg_info = problem_data->reg_pool->allocate ();
memset (reg_info, 0, sizeof (struct df_reg_info)); memset (reg_info, 0, sizeof (struct df_reg_info));
df->def_regs[i] = reg_info; df->def_regs[i] = reg_info;
reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool); reg_info = problem_data->reg_pool->allocate ();
memset (reg_info, 0, sizeof (struct df_reg_info)); memset (reg_info, 0, sizeof (struct df_reg_info));
df->use_regs[i] = reg_info; df->use_regs[i] = reg_info;
reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool); reg_info = problem_data->reg_pool->allocate ();
memset (reg_info, 0, sizeof (struct df_reg_info)); memset (reg_info, 0, sizeof (struct df_reg_info));
df->eq_use_regs[i] = reg_info; df->eq_use_regs[i] = reg_info;
df->def_info.begin[i] = 0; df->def_info.begin[i] = 0;
@ -740,15 +737,17 @@ df_free_ref (df_ref ref)
switch (DF_REF_CLASS (ref)) switch (DF_REF_CLASS (ref))
{ {
case DF_REF_BASE: case DF_REF_BASE:
pool_free (problem_data->ref_base_pool, ref); problem_data->ref_base_pool->remove ((df_base_ref *) (ref));
break; break;
case DF_REF_ARTIFICIAL: case DF_REF_ARTIFICIAL:
pool_free (problem_data->ref_artificial_pool, ref); problem_data->ref_artificial_pool->remove
((df_artificial_ref *) (ref));
break; break;
case DF_REF_REGULAR: case DF_REF_REGULAR:
pool_free (problem_data->ref_regular_pool, ref); problem_data->ref_regular_pool->remove
((df_regular_ref *) (ref));
break; break;
} }
} }
@ -851,7 +850,7 @@ df_insn_create_insn_record (rtx_insn *insn)
insn_rec = DF_INSN_INFO_GET (insn); insn_rec = DF_INSN_INFO_GET (insn);
if (!insn_rec) if (!insn_rec)
{ {
insn_rec = (struct df_insn_info *) pool_alloc (problem_data->insn_pool); insn_rec = problem_data->insn_pool->allocate ();
DF_INSN_INFO_SET (insn, insn_rec); DF_INSN_INFO_SET (insn, insn_rec);
} }
memset (insn_rec, 0, sizeof (struct df_insn_info)); memset (insn_rec, 0, sizeof (struct df_insn_info));
@ -899,7 +898,7 @@ df_mw_hardreg_chain_delete (struct df_mw_hardreg *hardregs)
for (; hardregs; hardregs = next) for (; hardregs; hardregs = next)
{ {
next = DF_MWS_NEXT (hardregs); next = DF_MWS_NEXT (hardregs);
pool_free (problem_data->mw_reg_pool, hardregs); problem_data->mw_reg_pool->remove (hardregs);
} }
} }
@ -940,7 +939,7 @@ df_insn_info_delete (unsigned int uid)
df_ref_chain_delete (insn_info->uses); df_ref_chain_delete (insn_info->uses);
df_ref_chain_delete (insn_info->eq_uses); df_ref_chain_delete (insn_info->eq_uses);
pool_free (problem_data->insn_pool, insn_info); problem_data->insn_pool->remove (insn_info);
DF_INSN_UID_SET (uid, NULL); DF_INSN_UID_SET (uid, NULL);
} }
} }
@ -1024,7 +1023,7 @@ df_free_collection_rec (struct df_collection_rec *collection_rec)
FOR_EACH_VEC_ELT (collection_rec->eq_use_vec, ix, ref) FOR_EACH_VEC_ELT (collection_rec->eq_use_vec, ix, ref)
df_free_ref (ref); df_free_ref (ref);
FOR_EACH_VEC_ELT (collection_rec->mw_vec, ix, mw) FOR_EACH_VEC_ELT (collection_rec->mw_vec, ix, mw)
pool_free (problem_data->mw_reg_pool, mw); problem_data->mw_reg_pool->remove (mw);
collection_rec->def_vec.release (); collection_rec->def_vec.release ();
collection_rec->use_vec.release (); collection_rec->use_vec.release ();
@ -1949,7 +1948,7 @@ df_mw_hardreg_chain_delete_eq_uses (struct df_insn_info *insn_info)
if (mw->flags & DF_REF_IN_NOTE) if (mw->flags & DF_REF_IN_NOTE)
{ {
*mw_ptr = DF_MWS_NEXT (mw); *mw_ptr = DF_MWS_NEXT (mw);
pool_free (problem_data->mw_reg_pool, mw); problem_data->mw_reg_pool->remove (mw);
} }
else else
mw_ptr = &DF_MWS_NEXT (mw); mw_ptr = &DF_MWS_NEXT (mw);
@ -2296,8 +2295,7 @@ df_sort_and_compress_mws (vec<df_mw_hardreg_ptr, va_heap> *mw_vec)
while (i + dist + 1 < count while (i + dist + 1 < count
&& df_mw_equal_p ((*mw_vec)[i], (*mw_vec)[i + dist + 1])) && df_mw_equal_p ((*mw_vec)[i], (*mw_vec)[i + dist + 1]))
{ {
pool_free (problem_data->mw_reg_pool, problem_data->mw_reg_pool->remove ((*mw_vec)[i + dist + 1]);
(*mw_vec)[i + dist + 1]);
dist++; dist++;
} }
/* Copy it down to the next position. */ /* Copy it down to the next position. */
@ -2525,18 +2523,18 @@ df_ref_create_structure (enum df_ref_class cl,
switch (cl) switch (cl)
{ {
case DF_REF_BASE: case DF_REF_BASE:
this_ref = (df_ref) pool_alloc (problem_data->ref_base_pool); this_ref = (df_ref) (problem_data->ref_base_pool->allocate ());
gcc_checking_assert (loc == NULL); gcc_checking_assert (loc == NULL);
break; break;
case DF_REF_ARTIFICIAL: case DF_REF_ARTIFICIAL:
this_ref = (df_ref) pool_alloc (problem_data->ref_artificial_pool); this_ref = (df_ref) (problem_data->ref_artificial_pool->allocate ());
this_ref->artificial_ref.bb = bb; this_ref->artificial_ref.bb = bb;
gcc_checking_assert (loc == NULL); gcc_checking_assert (loc == NULL);
break; break;
case DF_REF_REGULAR: case DF_REF_REGULAR:
this_ref = (df_ref) pool_alloc (problem_data->ref_regular_pool); this_ref = (df_ref) (problem_data->ref_regular_pool->allocate ());
this_ref->regular_ref.loc = loc; this_ref->regular_ref.loc = loc;
gcc_checking_assert (loc); gcc_checking_assert (loc);
break; break;
@ -2638,7 +2636,7 @@ df_ref_record (enum df_ref_class cl,
ref_flags |= DF_REF_PARTIAL; ref_flags |= DF_REF_PARTIAL;
ref_flags |= DF_REF_MW_HARDREG; ref_flags |= DF_REF_MW_HARDREG;
hardreg = (struct df_mw_hardreg *) pool_alloc (problem_data->mw_reg_pool); hardreg = problem_data->mw_reg_pool->allocate ();
hardreg->type = ref_type; hardreg->type = ref_type;
hardreg->flags = ref_flags; hardreg->flags = ref_flags;
hardreg->mw_reg = reg; hardreg->mw_reg = reg;