re PR middle-end/23623 (volatile keyword changes bitfield access size from 32bit to 8bit)

2013-12-11  Sandra Loosemore  <sandra@codesourcery.com>

        PR middle-end/23623
        PR middle-end/48784
        PR middle-end/56341
        PR middle-end/56997

        gcc/
        * expmed.c (strict_volatile_bitfield_p): New function.
        (store_bit_field_1): Don't special-case strict volatile
        bitfields here.
        (store_bit_field): Handle strict volatile bitfields here instead.
        (store_fixed_bit_field): Don't special-case strict volatile
        bitfields here.
        (extract_bit_field_1): Don't special-case strict volatile
        bitfields here.
        (extract_bit_field): Handle strict volatile bitfields here instead.
        (extract_fixed_bit_field): Don't special-case strict volatile
        bitfields here.  Simplify surrounding code to resemble that in
        store_fixed_bit_field.
        * doc/invoke.texi (Code Gen Options): Update
        -fstrict-volatile-bitfields description.

        gcc/testsuite/
        * gcc.dg/pr23623.c: New test.
        * gcc.dg/pr48784-1.c: New test.
        * gcc.dg/pr48784-2.c: New test.
        * gcc.dg/pr56341-1.c: New test.
        * gcc.dg/pr56341-2.c: New test.
        * gcc.dg/pr56997-1.c: New test.
        * gcc.dg/pr56997-2.c: New test.
        * gcc.dg/pr56997-3.c: New test.

From-SVN: r205896
This commit is contained in:
Sandra Loosemore 2013-12-11 11:50:05 -05:00 committed by Bernd Edlinger
parent 2ccd6f7276
commit f5d4f18c53
12 changed files with 432 additions and 78 deletions

View File

@ -1,3 +1,24 @@
2013-12-11 Sandra Loosemore <sandra@codesourcery.com>
PR middle-end/23623
PR middle-end/48784
PR middle-end/56341
PR middle-end/56997
* expmed.c (strict_volatile_bitfield_p): New function.
(store_bit_field_1): Don't special-case strict volatile
bitfields here.
(store_bit_field): Handle strict volatile bitfields here instead.
(store_fixed_bit_field): Don't special-case strict volatile
bitfields here.
(extract_bit_field_1): Don't special-case strict volatile
bitfields here.
(extract_bit_field): Handle strict volatile bitfields here instead.
(extract_fixed_bit_field): Don't special-case strict volatile
bitfields here. Simplify surrounding code to resemble that in
store_fixed_bit_field.
* doc/invoke.texi (Code Gen Options): Update
-fstrict-volatile-bitfields description.
2013-12-11 Kugan Vivekanandarajah <kuganv@linaro.org>
* configure.ac: Add check for aarch64 assembler -mabi support.

View File

@ -22001,6 +22001,12 @@ instruction, even though that accesses bytes that do not contain
any portion of the bit-field, or memory-mapped registers unrelated to
the one being updated.
In some cases, such as when the @code{packed} attribute is applied to a
structure field, it may not be possible to access the field with a single
read or write that is correctly aligned for the target machine. In this
case GCC falls back to generating multiple accesses rather than code that
will fault or truncate the result at run time.
The default value of this option is determined by the application binary
interface for the target processor.

View File

@ -416,6 +416,42 @@ lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum,
return bitnum % BITS_PER_WORD == 0;
}
/* Return true if -fstrict-volatile-bitfields applies an access of OP0
containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE. */
static bool
strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
enum machine_mode fieldmode)
{
unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
/* -fstrict-volatile-bitfields must be enabled and we must have a
volatile MEM. */
if (!MEM_P (op0)
|| !MEM_VOLATILE_P (op0)
|| flag_strict_volatile_bitfields <= 0)
return false;
/* Non-integral modes likely only happen with packed structures.
Punt. */
if (!SCALAR_INT_MODE_P (fieldmode))
return false;
/* The bit size must not be larger than the field mode, and
the field mode must not be larger than a word. */
if (bitsize > modesize || modesize > BITS_PER_WORD)
return false;
/* Check for cases of unaligned fields that must be split. */
if (bitnum % BITS_PER_UNIT + bitsize > modesize
|| (STRICT_ALIGNMENT
&& bitnum % GET_MODE_ALIGNMENT (fieldmode) + bitsize > modesize))
return false;
return true;
}
/* Return true if OP is a memory and if a bitfield of size BITSIZE at
bit number BITNUM can be treated as a simple value of mode MODE. */
@ -829,12 +865,8 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
cheap register alternative is available. */
if (MEM_P (op0))
{
/* Do not use unaligned memory insvs for volatile bitfields when
-fstrict-volatile-bitfields is in effect. */
if (!(MEM_VOLATILE_P (op0)
&& flag_strict_volatile_bitfields > 0)
&& get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
fieldmode)
if (get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
fieldmode)
&& store_bit_field_using_insv (&insv, op0, bitsize, bitnum, value))
return true;
@ -887,6 +919,27 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
enum machine_mode fieldmode,
rtx value)
{
/* Handle -fstrict-volatile-bitfields in the cases where it applies. */
if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, fieldmode))
{
/* Storing any naturally aligned field can be done with a simple
store. For targets that support fast unaligned memory, any
naturally sized, unit aligned field can be done directly. */
if (simple_mem_bitfield_p (str_rtx, bitsize, bitnum, fieldmode))
{
str_rtx = adjust_bitfield_address (str_rtx, fieldmode,
bitnum / BITS_PER_UNIT);
emit_move_insn (str_rtx, value);
}
else
/* Explicitly override the C/C++ memory model; ignore the
bit range so that we can do the access in the mode mandated
by -fstrict-volatile-bitfields instead. */
store_fixed_bit_field (str_rtx, bitsize, bitnum, 0, 0, value);
return;
}
/* Under the C++0x memory model, we must not touch bits outside the
bit region. Adjust the address to start at the beginning of the
bit region. */
@ -939,29 +992,12 @@ store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
if (MEM_P (op0))
{
unsigned HOST_WIDE_INT maxbits = MAX_FIXED_MODE_SIZE;
if (bitregion_end)
maxbits = bitregion_end - bitregion_start + 1;
/* Get the proper mode to use for this field. We want a mode that
includes the entire field. If such a mode would be larger than
a word, we won't be doing the extraction the normal way.
We don't want a mode bigger than the destination. */
mode = GET_MODE (op0);
if (GET_MODE_BITSIZE (mode) == 0
|| GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
mode = word_mode;
if (MEM_VOLATILE_P (op0)
&& GET_MODE_BITSIZE (GET_MODE (op0)) > 0
&& GET_MODE_BITSIZE (GET_MODE (op0)) <= maxbits
&& flag_strict_volatile_bitfields > 0)
mode = GET_MODE (op0);
else
mode = get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
mode = get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
if (mode == VOIDmode)
{
@ -1445,19 +1481,8 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
If that's wrong, the solution is to test for it and set TARGET to 0
if needed. */
/* If the bitfield is volatile, we need to make sure the access
remains on a type-aligned boundary. */
if (GET_CODE (op0) == MEM
&& MEM_VOLATILE_P (op0)
&& GET_MODE_BITSIZE (GET_MODE (op0)) > 0
&& flag_strict_volatile_bitfields > 0)
goto no_subreg_mode_swap;
/* Only scalar integer modes can be converted via subregs. There is an
additional problem for FP modes here in that they can have a precision
which is different from the size. mode_for_size uses precision, but
we want a mode based on the size, so we must avoid calling it for FP
modes. */
/* Get the mode of the field to use for atomic access or subreg
conversion. */
mode1 = mode;
if (SCALAR_INT_MODE_P (tmode))
{
@ -1490,8 +1515,6 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
}
no_subreg_mode_swap:
/* Handle fields bigger than a word. */
if (bitsize > BITS_PER_WORD)
@ -1611,11 +1634,8 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
cheap register alternative is available. */
if (MEM_P (op0))
{
/* Do not use extv/extzv for volatile bitfields when
-fstrict-volatile-bitfields is in effect. */
if (!(MEM_VOLATILE_P (op0) && flag_strict_volatile_bitfields > 0)
&& get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
tmode))
if (get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
tmode))
{
rtx result = extract_bit_field_using_extv (&extv, op0, bitsize,
bitnum, unsignedp,
@ -1681,6 +1701,31 @@ extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
enum machine_mode mode, enum machine_mode tmode)
{
enum machine_mode mode1;
/* Handle -fstrict-volatile-bitfields in the cases where it applies. */
if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0)
mode1 = GET_MODE (str_rtx);
else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
mode1 = GET_MODE (target);
else
mode1 = tmode;
if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, mode1))
{
rtx result;
/* Extraction of a full MODE1 value can be done with a load as long as
the field is on a byte boundary and is sufficiently aligned. */
if (simple_mem_bitfield_p (str_rtx, bitsize, bitnum, mode1))
result = adjust_bitfield_address (str_rtx, mode1,
bitnum / BITS_PER_UNIT);
else
result = extract_fixed_bit_field (mode, str_rtx, bitsize, bitnum,
target, unsignedp);
return convert_extracted_bit_field (result, mode, tmode, unsignedp);
}
return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
target, mode, tmode, true);
}
@ -1707,45 +1752,19 @@ extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
includes the entire field. If such a mode would be larger than
a word, we won't be doing the extraction the normal way. */
if (MEM_VOLATILE_P (op0)
&& flag_strict_volatile_bitfields > 0)
{
if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
mode = GET_MODE (op0);
else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
mode = GET_MODE (target);
else
mode = tmode;
}
else
mode = get_best_mode (bitsize, bitnum, 0, 0,
MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
mode = GET_MODE (op0);
if (GET_MODE_BITSIZE (mode) == 0
|| GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
mode = word_mode;
mode = get_best_mode (bitsize, bitnum, 0, 0,
MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
if (mode == VOIDmode)
/* The only way this should occur is if the field spans word
boundaries. */
return extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
unsigned int total_bits = GET_MODE_BITSIZE (mode);
HOST_WIDE_INT bit_offset = bitnum - bitnum % total_bits;
/* If we're accessing a volatile MEM, we can't apply BIT_OFFSET
if it results in a multi-word access where we otherwise wouldn't
have one. So, check for that case here. */
if (MEM_P (op0)
&& MEM_VOLATILE_P (op0)
&& flag_strict_volatile_bitfields > 0
&& bitnum % BITS_PER_UNIT + bitsize <= total_bits
&& bitnum % GET_MODE_BITSIZE (mode) + bitsize > total_bits)
{
/* If the target doesn't support unaligned access, give up and
split the access into two. */
if (STRICT_ALIGNMENT)
return extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
bit_offset = bitnum - bitnum % BITS_PER_UNIT;
}
op0 = adjust_bitfield_address (op0, mode, bit_offset / BITS_PER_UNIT);
bitnum -= bit_offset;
op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
}
mode = GET_MODE (op0);

View File

@ -1,3 +1,18 @@
2013-12-11 Sandra Loosemore <sandra@codesourcery.com>
PR middle-end/23623
PR middle-end/48784
PR middle-end/56341
PR middle-end/56997
* gcc.dg/pr23623.c: New test.
* gcc.dg/pr48784-1.c: New test.
* gcc.dg/pr48784-2.c: New test.
* gcc.dg/pr56341-1.c: New test.
* gcc.dg/pr56341-2.c: New test.
* gcc.dg/pr56997-1.c: New test.
* gcc.dg/pr56997-2.c: New test.
* gcc.dg/pr56997-3.c: New test.
2013-12-11 Janus Weil <janus@gcc.gnu.org>
PR fortran/58916

View File

@ -0,0 +1,45 @@
/* { dg-do compile } */
/* { dg-options "-fstrict-volatile-bitfields -fdump-rtl-final" } */
/* With -fstrict-volatile-bitfields, the volatile accesses to bf2.b
and bf3.b must do unsigned int reads/writes. The non-volatile
accesses to bf1.b are not so constrained. */
extern struct
{
unsigned int b : 1;
} bf1;
extern volatile struct
{
unsigned int b : 1;
} bf2;
extern struct
{
volatile unsigned int b : 1;
} bf3;
void writeb(void)
{
bf1.b = 1;
bf2.b = 1; /* volatile read + volatile write */
bf3.b = 1; /* volatile read + volatile write */
}
extern unsigned int x1, x2, x3;
void readb(void)
{
x1 = bf1.b;
x2 = bf2.b; /* volatile write */
x3 = bf3.b; /* volatile write */
}
/* There should be 6 volatile MEMs total, but scan-rtl-dump-times counts
the number of match variables and not the number of matches. Since
the parenthesized subexpression in the regexp introduces an extra match
variable, we need to give a count of 12 instead of 6 here. */
/* { dg-final { scan-rtl-dump-times "mem/v(/.)*:SI" 12 "final" } } */
/* { dg-final { cleanup-rtl-dump "final" } } */

View File

@ -0,0 +1,18 @@
/* { dg-do run } */
/* { dg-options "-fstrict-volatile-bitfields" } */
extern void abort (void);
#pragma pack(1)
volatile struct S0 {
signed a : 7;
unsigned b : 28; /* b can't be fetched with an aligned 32-bit access, */
/* but it certainly can be fetched with an unaligned access */
} g = {0,0xfffffff};
int main() {
unsigned b = g.b;
if (b != 0xfffffff)
abort ();
return 0;
}

View File

@ -0,0 +1,18 @@
/* { dg-do run } */
/* { dg-options "-fno-strict-volatile-bitfields" } */
extern void abort (void);
#pragma pack(1)
volatile struct S0 {
signed a : 7;
unsigned b : 28; /* b can't be fetched with an aligned 32-bit access, */
/* but it certainly can be fetched with an unaligned access */
} g = {0,0xfffffff};
int main() {
unsigned b = g.b;
if (b != 0xfffffff)
abort ();
return 0;
}

View File

@ -0,0 +1,40 @@
/* { dg-do run } */
/* { dg-options "-fstrict-volatile-bitfields" } */
extern void abort (void);
struct test0
{
unsigned char b1[2];
} __attribute__((packed, aligned(2)));
struct test1
{
volatile unsigned long a1;
unsigned char b1[4];
} __attribute__((packed, aligned(2)));
struct test2
{
struct test0 t0;
struct test1 t1;
struct test0 t2;
} __attribute__((packed, aligned(2)));
struct test2 xx;
struct test2 *x1 = &xx;
#define MAGIC 0x12345678
void test0 (struct test2* x1)
{
x1->t1.a1 = MAGIC;
}
int main()
{
test0 (x1);
if (xx.t1.a1 != MAGIC)
abort ();
return 0;
}

View File

@ -0,0 +1,40 @@
/* { dg-do run } */
/* { dg-options "-fno-strict-volatile-bitfields" } */
extern void abort (void);
struct test0
{
unsigned char b1[2];
} __attribute__((packed, aligned(2)));
struct test1
{
volatile unsigned long a1;
unsigned char b1[4];
} __attribute__((packed, aligned(2)));
struct test2
{
struct test0 t0;
struct test1 t1;
struct test0 t2;
} __attribute__((packed, aligned(2)));
struct test2 xx;
struct test2 *x1 = &xx;
#define MAGIC 0x12345678
void test0 (struct test2* x1)
{
x1->t1.a1 = MAGIC;
}
int main()
{
test0 (x1);
if (xx.t1.a1 != MAGIC)
abort ();
return 0;
}

View File

@ -0,0 +1,44 @@
/* Test volatile access to unaligned field. */
/* { dg-do run } */
/* { dg-options "-fstrict-volatile-bitfields" } */
extern void abort (void);
#define test_type unsigned short
#define MAGIC (unsigned short)0x102u
typedef struct s{
unsigned char Prefix;
test_type Type;
}__attribute((__packed__)) ss;
volatile ss v;
ss g;
void __attribute__((noinline))
foo (test_type u)
{
v.Type = u;
}
test_type __attribute__((noinline))
bar (void)
{
return v.Type;
}
int main()
{
test_type temp;
foo(MAGIC);
__builtin_memcpy(&g, (void *)&v, sizeof(g));
if (g.Type != MAGIC)
abort ();
g.Type = MAGIC;
__builtin_memcpy((void *)&v, &g, sizeof(v));
temp = bar();
if (temp != MAGIC)
abort ();
return 0;
}

View File

@ -0,0 +1,44 @@
/* Test volatile access to unaligned field. */
/* { dg-do run } */
/* { dg-options "-fstrict-volatile-bitfields" } */
extern void abort (void);
#define test_type unsigned int
#define MAGIC 0x1020304u
typedef struct s{
unsigned char Prefix;
test_type Type;
}__attribute((__packed__)) ss;
volatile ss v;
ss g;
void __attribute__((noinline))
foo (test_type u)
{
v.Type = u;
}
test_type __attribute__((noinline))
bar (void)
{
return v.Type;
}
int main()
{
test_type temp;
foo(MAGIC);
__builtin_memcpy(&g, (void *)&v, sizeof(g));
if (g.Type != MAGIC)
abort ();
g.Type = MAGIC;
__builtin_memcpy((void *)&v, &g, sizeof(v));
temp = bar();
if (temp != MAGIC)
abort ();
return 0;
}

View File

@ -0,0 +1,44 @@
/* Test volatile access to unaligned field. */
/* { dg-do run } */
/* { dg-options "-fstrict-volatile-bitfields" } */
extern void abort (void);
#define test_type unsigned long long
#define MAGIC 0x102030405060708ull
typedef struct s{
unsigned char Prefix;
test_type Type;
}__attribute((__packed__)) ss;
volatile ss v;
ss g;
void __attribute__((noinline))
foo (test_type u)
{
v.Type = u;
}
test_type __attribute__((noinline))
bar (void)
{
return v.Type;
}
int main()
{
test_type temp;
foo(MAGIC);
__builtin_memcpy(&g, (void *)&v, sizeof(g));
if (g.Type != MAGIC)
abort ();
g.Type = MAGIC;
__builtin_memcpy((void *)&v, &g, sizeof(v));
temp = bar();
if (temp != MAGIC)
abort ();
return 0;
}