diff --git a/gcc/analyzer/engine.cc b/gcc/analyzer/engine.cc index 48320bc062e..5b09e37ae65 100644 --- a/gcc/analyzer/engine.cc +++ b/gcc/analyzer/engine.cc @@ -64,6 +64,7 @@ along with GCC; see the file COPYING3. If not see #include "analyzer/bar-chart.h" #include #include "plugin.h" +#include "target.h" /* For an overview, see gcc/doc/analyzer.texi. */ @@ -4821,6 +4822,13 @@ impl_run_checkers (logger *logger) { LOG_SCOPE (logger); + if (logger) + { + logger->log ("BITS_BIG_ENDIAN: %i", BITS_BIG_ENDIAN ? 1 : 0); + logger->log ("BYTES_BIG_ENDIAN: %i", BYTES_BIG_ENDIAN ? 1 : 0); + logger->log ("WORDS_BIG_ENDIAN: %i", WORDS_BIG_ENDIAN ? 1 : 0); + } + /* If using LTO, ensure that the cgraph nodes have function bodies. */ cgraph_node *node; FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node) diff --git a/gcc/analyzer/region-model-manager.cc b/gcc/analyzer/region-model-manager.cc index 0ca0c8ad02e..621eff0e139 100644 --- a/gcc/analyzer/region-model-manager.cc +++ b/gcc/analyzer/region-model-manager.cc @@ -431,6 +431,60 @@ region_model_manager::get_or_create_cast (tree type, const svalue *arg) return get_or_create_unaryop (type, op, arg); } +/* Subroutine of region_model_manager::maybe_fold_binop for handling + (TYPE)(COMPOUND_SVAL BIT_AND_EXPR CST) that may have been generated by + optimize_bit_field_compare, where CST is from ARG1. + + Support masking out bits from a compound_svalue for comparing a bitfield + against a value, as generated by optimize_bit_field_compare for + BITFIELD == VALUE. + + If COMPOUND_SVAL has a value for the appropriate bits, return it, + shifted accordingly. + Otherwise return NULL. */ + +const svalue * +region_model_manager:: +maybe_undo_optimize_bit_field_compare (tree type, + const compound_svalue *compound_sval, + tree cst, + const svalue *arg1) +{ + if (type != unsigned_char_type_node) + return NULL; + + const binding_map &map = compound_sval->get_map (); + unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (cst); + /* If "mask" is a contiguous range of set bits, see if the + compound_sval has a value for those bits. */ + bit_range bits (0, 0); + if (!bit_range::from_mask (mask, &bits)) + return NULL; + + bit_range bound_bits (bits); + if (BYTES_BIG_ENDIAN) + bound_bits = bit_range (BITS_PER_UNIT - bits.get_next_bit_offset (), + bits.m_size_in_bits); + const concrete_binding *conc + = get_store_manager ()->get_concrete_binding (bound_bits, BK_direct); + const svalue *sval = map.get (conc); + if (!sval) + return NULL; + + /* We have a value; + shift it by the correct number of bits. */ + const svalue *lhs = get_or_create_cast (type, sval); + HOST_WIDE_INT bit_offset = bits.get_start_bit_offset ().to_shwi (); + tree shift_amt = build_int_cst (type, bit_offset); + const svalue *shift_sval = get_or_create_constant_svalue (shift_amt); + const svalue *shifted_sval = get_or_create_binop (type, LSHIFT_EXPR, + lhs, shift_sval); + /* Reapply the mask (needed for negative + signed bitfields). */ + return get_or_create_binop (type, BIT_AND_EXPR, + shifted_sval, arg1); +} + /* Subroutine of region_model_manager::get_or_create_binop. Attempt to fold the inputs and return a simpler svalue *. Otherwise, return NULL. */ @@ -485,43 +539,13 @@ region_model_manager::maybe_fold_binop (tree type, enum tree_code op, /* "(ARG0 & 0)" -> "0". */ return get_or_create_constant_svalue (build_int_cst (type, 0)); - /* Support masking out bits from a compound_svalue, as this - is generated when accessing bitfields. */ if (const compound_svalue *compound_sval = arg0->dyn_cast_compound_svalue ()) - { - const binding_map &map = compound_sval->get_map (); - unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (cst1); - /* If "mask" is a contiguous range of set bits, see if the - compound_sval has a value for those bits. */ - bit_range bits (0, 0); - if (bit_range::from_mask (mask, &bits)) - { - const concrete_binding *conc - = get_store_manager ()->get_concrete_binding (bits, - BK_direct); - if (const svalue *sval = map.get (conc)) - { - /* We have a value; - shift it by the correct number of bits. */ - const svalue *lhs = get_or_create_cast (type, sval); - HOST_WIDE_INT bit_offset - = bits.get_start_bit_offset ().to_shwi (); - tree shift_amt = build_int_cst (type, bit_offset); - const svalue *shift_sval - = get_or_create_constant_svalue (shift_amt); - const svalue *shifted_sval - = get_or_create_binop (type, - LSHIFT_EXPR, - lhs, shift_sval); - /* Reapply the mask (needed for negative - signed bitfields). */ - return get_or_create_binop (type, - BIT_AND_EXPR, - shifted_sval, arg1); - } - } - } + if (const svalue *sval + = maybe_undo_optimize_bit_field_compare (type, + compound_sval, + cst1, arg1)) + return sval; } break; case TRUTH_ANDIF_EXPR: diff --git a/gcc/analyzer/region-model.h b/gcc/analyzer/region-model.h index 6c84a3795a3..053b4642b6f 100644 --- a/gcc/analyzer/region-model.h +++ b/gcc/analyzer/region-model.h @@ -273,6 +273,9 @@ private: const svalue *maybe_fold_sub_svalue (tree type, const svalue *parent_svalue, const region *subregion); + const svalue *maybe_undo_optimize_bit_field_compare (tree type, + const compound_svalue *compound_sval, + tree cst, const svalue *arg1); unsigned m_next_region_id; root_region m_root_region; diff --git a/gcc/analyzer/store.cc b/gcc/analyzer/store.cc index 699de94cdb0..d75fb2c4c7a 100644 --- a/gcc/analyzer/store.cc +++ b/gcc/analyzer/store.cc @@ -249,6 +249,18 @@ bit_range::dump_to_pp (pretty_printer *pp) const pp_wide_int (pp, get_next_bit_offset (), SIGNED); } +/* Dump this object to stderr. */ + +DEBUG_FUNCTION void +bit_range::dump () const +{ + pretty_printer pp; + pp.buffer->stream = stderr; + dump_to_pp (&pp); + pp_newline (&pp); + pp_flush (&pp); +} + int bit_range::cmp (const bit_range &br1, const bit_range &br2) { diff --git a/gcc/analyzer/store.h b/gcc/analyzer/store.h index 7bd2824dba9..ca9ff696bca 100644 --- a/gcc/analyzer/store.h +++ b/gcc/analyzer/store.h @@ -275,6 +275,7 @@ struct bit_range {} void dump_to_pp (pretty_printer *pp) const; + void dump () const; bit_offset_t get_start_bit_offset () const {