Add MSBIT* and LSBIT* macro's to sim-bits.h

Add more macro's for extracting sub word quantites to sim-endian.h
This commit is contained in:
Andrew Cagney 1997-08-27 07:56:27 +00:00
parent 750b794296
commit d6fea803dc
7 changed files with 1214 additions and 1 deletions

View File

@ -1,3 +1,22 @@
Wed Aug 27 15:13:04 1997 Andrew Cagney <cagney@b1.cygnus.com>
* sim-bits.h (LSBIT8, LSBIT16, LSBIT32, LSBIT64, LSBIT, MSBIT8,
MSBIT16, MSBIT32, MSBIT64, MSBIT): New macros - single bit as
offset from MSB/LSB.
* sim-endian.h (A1_8, A2_8, A4_8, A1_4, A2_4, A1_2): New macro,
access address of sub word quantity of a hosts 16, 32, 64 bit word
type.
(V1_2, V1_4, V2_4, V1_8, V2_8, V4_8): Ditto for values.
(U8_1, U8_2, U8_4, U4_1, U4_2, U2_1): Ditto for set of values.
(V2_H1, V2_L1, V4_H2, V4_L2, V8_L4, V8_H4): Given N byte argument,
return N*2 byte value with argument in Hi/Lo word. Renamed from
V1_H2, V1_L2, V2_H4, V2_L4, V4_H8, V4_L8.
* sim-alu.h (ALU32_HAD_OVERFLOW): Use 64 bit mask not 32bit.
(ALU16_HAD_CARRY, ALU32_HAD_CARRY, ALU16_HAD_OVERFLOW): Use MSBIT
so that bit offset is explicit.
Wed Aug 27 11:55:35 1997 Andrew Cagney <cagney@b1.cygnus.com>
* sim-utils.c (sim_analyze_program): Add prog_name argument.

348
sim/common/sim-alu.h Normal file
View File

@ -0,0 +1,348 @@
/* This file is part of the program psim.
Copyright (C) 1994-1996, Andrew Cagney <cagney@highland.com.au>
Copyright (C) 1997, Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _SIM_ALU_H_
#define _SIM_ALU_H_
#include "sim-xcat.h"
/* 32bit target expressions:
Each calculation is performed three times using each of the
signed64, unsigned64 and long integer types. The macro ALU_END
(in _ALU_RESULT_VAL) then selects which of the three alternative
results will be used in the final assignment of the target
register. As this selection is determined at compile time by
fields in the instruction (OE, EA, Rc) the compiler has sufficient
information to firstly simplify the selection code into a single
case and then back anotate the equations and hence eliminate any
resulting dead code. That dead code being the calculations that,
as it turned out were not in the end needed.
64bit arrithemetic is used firstly because it allows the use of
gcc's efficient long long operators (typically efficiently output
inline) and secondly because the resultant answer will contain in
the low 32bits the answer while in the high 32bits is either carry
or status information. */
/* 16bit target expressions:
These are a simplified version of the 32bit target expressions */
/* 64bit target expressions:
Unfortunatly 128bit arrithemetic isn't that common. Consequently
the 32/64 bit trick can not be used. Instead all calculations are
required to retain carry/overflow information in separate
variables. Even with this restriction it is still possible for the
trick of letting the compiler discard the calculation of unneeded
values */
/* Macro's to type cast 32bit constants to 64bits */
#define SIGNED64(val) ((signed64)(signed32)(val))
#define UNSIGNED64(val) ((unsigned64)(unsigned32)(val))
/* Start a section of ALU code */
#define ALU16_BEGIN(VAL) \
{ \
signed_word alu_carry_val; \
unsigned_word alu_overflow_val; \
ALU16_SET(VAL)
#define ALU32_BEGIN(VAL) \
{ \
natural_word alu_val; \
unsigned64 alu_carry_val; \
signed64 alu_overflow_val; \
ALU32_SET(VAL)
#define ALU_BEGIN(VAL) XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_BEGIN)(VAL)
/* More basic alu operations */
#define ALU16_SET(VAL) \
do { \
alu_carry_val = (unsigned16)(VAL); \
alu_overflow_val = (signed16)(VAL); \
} while (0)
#define ALU32_SET(VAL) \
do { \
alu_val = (unsigned32)(VAL); \
alu_carry_val = (unsigned32)(alu_val); \
alu_overflow_val = (signed32)(alu_val); \
} while (0)
#define ALU64_SET(VAL) \
do { \
alu_val = (VAL); \
alu_carry_val = ((unsigned64)alu_val) >> 32; \
alu_overflow_val = ((signed64)alu_val) >> 32; \
} while (0)
#define ALU_SET(VAL) XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_SET)(VAL)
#define ALU16_ADD(VAL) \
do { \
alu_carry_val += (unsigned16)(VAL); \
alu_overflow_val += (signed16)(VAL); \
} while (0)
#define ALU32_ADD(VAL) \
do { \
alu_val += (VAL); \
alu_carry_val += (unsigned32)(VAL); \
alu_overflow_val += (signed32)(VAL); \
} while (0)
#define ALU64_ADD(VAL) \
do { \
unsigned64 alu_lo = (UNSIGNED64(alu_val) \
+ UNSIGNED64(VAL)); \
signed alu_carry = ((alu_lo & BIT(31)) != 0); \
alu_carry_val = (alu_carry_val \
+ UNSIGNED64(EXTRACTED(val, 0, 31)) \
+ alu_carry); \
alu_overflow_val = (alu_overflow_val \
+ SIGNED64(EXTRACTED(val, 0, 31)) \
+ alu_carry); \
alu_val = alu_val + val; \
} while (0)
#define ALU_ADD(VAL) XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_ADD)(VAL)
#define ALU16_ADD_CA \
do { \
signed carry = ALU_CARRY; \
ALU16_ADD(carry); \
} while (0)
#define ALU32_ADD_CA \
do { \
signed carry = ALU_CARRY; \
ALU32_ADD(carry); \
} while (0)
#define ALU64_ADD_CA \
do { \
signed carry = ALU_CARRY; \
ALU64_ADD(carry); \
} while (0)
#define ALU_ADD_CA XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_ADD_CA)
#define ALU16_SUB(VAL) \
do { \
alu_carry_val -= (unsigned16)(VAL); \
alu_overflow_val -= (signed16)(VAL); \
} while (0)
#define ALU32_SUB(VAL) \
do { \
alu_val -= (VAL); \
alu_carry_val -= (unsigned32)(VAL); \
alu_overflow_val -= (signed32)(VAL); \
} while (0)
#define ALU64_SUB(VAL) \
do { \
error("ALU_SUB64"); \
} while (0)
#define ALU_SUB(VAL) XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_SUB)(VAL)
#define ALU16_SUB_CA \
do { \
signed carry = ALU_CARRY; \
ALU16_SUB(carry); \
} while (0)
#define ALU32_SUB_CA \
do { \
signed carry = ALU_CARRY; \
ALU32_SUB(carry); \
} while (0)
#define ALU64_SUB_CA \
do { \
signed carry = ALU_CARRY; \
ALU64_SUB(carry); \
} while (0)
#define ALU_SUB_CA XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_SUB_CA)
#define ALU16_OR(VAL) \
do { \
error("ALU16_OR"); \
} while (0)
#define ALU32_OR(VAL) \
do { \
alu_val |= (VAL); \
alu_carry_val = (unsigned32)(alu_val); \
alu_overflow_val = (signed32)(alu_val); \
} while (0)
#define ALU64_OR(VAL) \
do { \
error("ALU_OR64"); \
} while (0)
#define ALU_OR(VAL) XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_OR)(VAL)
#define ALU16_XOR(VAL) \
do { \
error("ALU16_XOR"); \
} while (0)
#define ALU32_XOR(VAL) \
do { \
alu_val ^= (VAL); \
alu_carry_val = (unsigned32)(alu_val); \
alu_overflow_val = (signed32)(alu_val); \
} while (0)
#define ALU64_XOR(VAL) \
do { \
error("ALU_XOR64"); \
} while (0)
#define ALU_XOR(VAL) XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_XOR)(VAL)
#define ALU16_NEGATE \
do { \
error("ALU_NEGATE16"); \
} while (0)
#define ALU32_NEGATE \
do { \
alu_val = -alu_val; \
alu_carry_val = -alu_carry_val; \
alu_overflow_val = -alu_overflow_val; \
} while(0)
#define ALU64_NEGATE \
do { \
error("ALU_NEGATE64"); \
} while (0)
#define ALU_NEGATE XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_NEGATE)
#define ALU16_AND(VAL) \
do { \
error("ALU_AND16"); \
} while (0)
#define ALU32_AND(VAL) \
do { \
alu_val &= (VAL); \
alu_carry_val = (unsigned32)(alu_val); \
alu_overflow_val = (signed32)(alu_val); \
} while (0)
#define ALU64_AND(VAL) \
do { \
error("ALU_AND64"); \
} while (0)
#define ALU_AND(VAL) XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_AND)(VAL)
#define ALU16_NOT(VAL) \
do { \
error("ALU_NOT16"); \
} while (0)
#define ALU32_NOT \
do { \
signed64 new_alu_val = ~alu_val; \
ALU_SET(new_alu_val); \
} while (0)
#define ALU64_NOT \
do { \
error("ALU_NOT64"); \
} while (0)
#define ALU_NOT XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_NOT)
/* Make available various results */
/* overflow occures if the sign bit differs from the carry bit */
#define ALU16_HAD_OVERFLOW \
(!(alu_overflow_val & MSBIT32 (0)) != !(alu_overflow_val & MSBIT32 (16)))
#define ALU32_HAD_OVERFLOW \
((((unsigned64)(alu_overflow_val & BIT64(0))) >> 32) \
!= (alu_overflow_val & MSBIT64(32)))
#define ALU_HAD_OVERFLOW XCONCAT3(ALU,WITH_TARGET_WORD_BITSIZE,_HAD_OVERFLOW)
/* carry found in bit before sign */
#define ALU16_HAD_CARRY \
(alu_carry_val & MSBIT32(16))
#define ALU32_HAD_CARRY \
(alu_carry_val & MSBIT64(31))
#endif

485
sim/common/sim-bits.h Normal file
View File

@ -0,0 +1,485 @@
/* This file is part of the program psim.
Copyright (C) 1994-1996, Andrew Cagney <cagney@highland.com.au>
Copyright (C) 1997, Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _SIM_BITS_H_
#define _SIM_BITS_H_
/* bit manipulation routines:
Bit numbering: The bits are numbered according to the target ISA's
convention. That being controlled by WITH_TARGET_WORD_MSB. For
the PowerPC (WITH_TARGET_WORD_MSB == 0) the numbering is 0..31
while for the MIPS (WITH_TARGET_WORD_MSB == 31) it is 31..0.
Size convention: Each macro is in three forms - <MACRO>32 which
operates in 32bit quantity (bits are numbered 0..31); <MACRO>64
which operates using 64bit quantites (and bits are numbered 0..63);
and <MACRO> which operates using the bit size of the target
architecture (bits are still numbered 0..63), with 32bit
architectures ignoring the first 32bits leaving bit 32 as the most
significant.
BIT*(POS): Constant with just 1 bit set.
LSBIT*(OFFSET): Constant with just 1 bit set - LS bit is zero.
MSBIT*(OFFSET): Constant with just 1 bit set - MS bit is zero.
MASK*(FIRST, LAST): Constant with bits [FIRST .. LAST] set. The
<MACRO> (no size) version permits FIRST >= LAST and generates a
wrapped bit mask vis ([0..LAST] | [FIRST..LSB]).
LSMASK*(NR_BITS): Like MASK only NR least significant bits are set.
MSMASK*(NR_BITS): Like MASK only NR most significant bits are set.
MASKED*(VALUE, FIRST, LAST): Masks out all but bits [FIRST
.. LAST].
LSMASKED*(VALUE, NR_BITS): Mask out all but the least significant
NR_BITS of the value.
MSMASKED*(VALUE, NR_BITS): Mask out all but the most significant
NR_BITS of the value.
EXTRACTED*(VALUE, FIRST, LAST): Masks out bits [FIRST .. LAST] but
also right shifts the masked value so that bit LAST becomes the
least significant (right most).
SHUFFLED**(VALUE, OLD, NEW): Mask then move a single bit from OLD
new NEW.
MOVED**(VALUE, OLD_FIRST, OLD_LAST, NEW_FIRST, NEW_LAST): Moves
things around so that bits OLD_FIRST..OLD_LAST are masked then
moved to NEW_FIRST..NEW_LAST.
INSERTED*(VALUE, FIRST, LAST): Takes VALUE and `inserts' the (LAST
- FIRST + 1) least significant bits into bit positions [ FIRST
.. LAST ]. This is almost the complement to EXTRACTED.
IEA_MASKED(SHOULD_MASK, ADDR): Convert the address to the targets
natural size. If in 32bit mode, discard the high 32bits.
EXTENDED(VALUE): Convert VALUE (32bits of it) to the targets
natural size. If in 64bit mode, sign extend the value.
ALIGN_*(VALUE): Round upwards the value so that it is aligned.
FLOOR_*(VALUE): Truncate the value so that it is aligned.
ROTL*(VALUE, NR_BITS): Return the value rotated by NR_BITS left.
ROTR*(VALUE, NR_BITS): Return the value rotated by NR_BITS right.
SEXT*(VAL, SIGN_BIT): Treat SIGN_BIT as the sign, extend it.
Note: Only the BIT* and MASK* macros return a constant that can be
used in variable declarations.
*/
/* compute the number of bits between START and STOP */
#if (WITH_TARGET_WORD_MSB == 0)
#define _MAKE_WIDTH(START, STOP) (STOP - START + 1)
#else
#define _MAKE_WIDTH(START, STOP) (START - STOP + 1)
#endif
/* compute the number shifts required to move a bit between LSB (MSB)
and POS */
#if (WITH_TARGET_WORD_MSB == 0)
#define _LSB_SHIFT(WIDTH, POS) (WIDTH - 1 - POS)
#else
#define _LSB_SHIFT(WIDTH, POS) (POS)
#endif
#if (WITH_TARGET_WORD_MSB == 0)
#define _MSB_SHIFT(WIDTH, POS) (POS)
#else
#define _MSB_SHIFT(WIDTH, POS) (WIDTH - 1 - POS)
#endif
/* compute the absolute bit position given the OFFSET from the MSB(LSB)
NB: _MAKE_xxx_POS (WIDTH, _MAKE_xxx_SHIFT (WIDTH, POS)) == POS */
#if (WITH_TARGET_WORD_MSB == 0)
#define _MSB_POS(WIDTH, SHIFT) (SHIFT)
#else
#define _MSB_POS(WIDTH, SHIFT) (WIDTH - 1 - SHIFT)
#endif
#if (WITH_TARGET_WORD_MSB == 0)
#define _LSB_POS(WIDTH, SHIFT) (WIDTH - 1 - SHIFT)
#else
#define _LSB_POS(WIDTH, SHIFT) (SHIFT)
#endif
/* convert a 64 bit position into a corresponding 32bit position. MSB
pos handles the posibility that the bit lies beyond the 32bit
boundary */
#if (WITH_TARGET_WORD_MSB == 0)
#define _MSB_32(START, STOP) (START <= STOP \
? (START < 32 ? 0 : START - 32) \
: (STOP < 32 ? 0 : STOP - 32))
#else
#define _MSB_32(START, STOP) (START >= STOP \
? (START >= 32 ? 31 : START) \
: (STOP >= 32 ? 31 : STOP))
#endif
#if (WITH_TARGET_WORD_MSB == 0)
#define _LSB_32(START, STOP) (START <= STOP \
? (STOP < 32 ? 0 : STOP - 32) \
: (START < 32 ? 0 : START - 32))
#else
#define _LSB_32(START, STOP) (START >= STOP \
? (STOP >= 32 ? 31 : STOP) \
: (START >= 32 ? 31 : START))
#endif
#if (WITH_TARGET_WORD_MSB == 0)
#define _MSB(START, STOP) (START <= STOP ? START : STOP)
#else
#define _MSB(START, STOP) (START >= STOP ? START : STOP)
#endif
#if (WITH_TARGET_WORD_MSB == 0)
#define _LSB(START, STOP) (START <= STOP ? STOP : START)
#else
#define _LSB(START, STOP) (START >= STOP ? STOP : START)
#endif
/* Bit operations */
#define _BITn(WIDTH, POS) ((natural##WIDTH)1 \
<< _LSB_SHIFT (WIDTH, POS))
#define BIT4(POS) (1 << _LSB_SHIFT (4, (POS)))
#define BIT5(POS) (1 << _LSB_SHIFT (5, (POS)))
#define BIT8(POS) (1 << _LSB_SHIFT (8, (POS)))
#define BIT10(POS) (1 << _LSB_SHIFT (10, (POS)))
#define BIT16(POS) _BITn (16, (POS))
#define BIT32(POS) _BITn (32, (POS))
#define BIT64(POS) _BITn (64, (POS))
#if (WITH_TARGET_WORD_BITSIZE == 64)
#define BIT(POS) BIT64(POS)
#endif
#if (WITH_TARGET_WORD_BITSIZE == 32)
#if (WITH_TARGET_WORD_MSB == 0)
#define BIT(POS) ((POS) < 32 \
? 0 \
: (1 << ((POS) < 32 ? 0 : _LSB_SHIFT(64, (POS)))))
#else
#define BIT(POS) ((POS) >= 32 \
? 0 \
: (1 << ((POS) >= 32 ? 0 : (POS))))
#endif
#endif
#if !defined (BIT)
#error "BIT never defined"
#endif
/* LS/MS Bit operations */
#define LSBIT8(POS) ((unsigned8)1 << (POS))
#define LSBIT16(POS) ((unsigned16)1 << (POS))
#define LSBIT32(POS) ((unsigned32)1 << (POS))
#define LSBIT64(POS) ((unsigned64)1 << (POS))
#define LSBIT(POS) ((unsigned_word)1 << (POS))
#define MSBIT8(POS) ((unsigned8)1 << (8 - 1 - (POS)))
#define MSBIT16(POS) ((unsigned16)1 << (16 - 1 - (POS)))
#define MSBIT32(POS) ((unsigned32)1 << (32 - 1 - (POS)))
#define MSBIT64(POS) ((unsigned64)1 << (64 - 1 - (POS)))
#define MSBIT(POS) ((unsigned_word)1 << (WITH_TARGET_WORD_BITSIZE - 1 - (POS)))
/* multi bit mask */
/* 111111 -> mmll11 -> mm11ll */
#define _MASKn(WIDTH, START, STOP) (((unsigned##WIDTH)(-1) \
>> (_MSB_SHIFT (WIDTH, START) \
+ _LSB_SHIFT (WIDTH, STOP))) \
<< _LSB_SHIFT (WIDTH, STOP))
#define MASK16(START, STOP) _MASKn(16, (START), (STOP))
#define MASK32(START, STOP) _MASKn(32, (START), (STOP))
#define MASK64(START, STOP) _MASKn(64, (START), (STOP))
#if (WITH_TARGET_WORD_MSB == 0)
#define _POS_LE(START, STOP) (START <= STOP)
#else
#define _POS_LE(START, STOP) (STOP <= START)
#endif
#if (WITH_TARGET_WORD_BITSIZE == 64)
#define MASK(START, STOP) \
(_POS_LE ((START), (STOP)) \
? _MASKn(64, \
_MSB ((START), (STOP)), \
_LSB ((START), (STOP)) ) \
: (_MASKn(64, _MSB_POS (64, 0), (STOP)) \
| _MASKn(64, (START), _LSB_POS (64, 0))))
#endif
#if (WITH_TARGET_WORD_BITSIZE == 32)
#define MASK(START, STOP) \
(_POS_LE ((START), (STOP)) \
? (_POS_LE ((STOP), _MSB_POS (64, 31)) \
? 0 \
: _MASKn (32, \
_MSB_32 ((START), (STOP)), \
_LSB_32 ((START), (STOP)))) \
: (_MASKn (32, \
_LSB_32 ((START), (STOP)), \
_LSB_POS (32, 0)) \
| (_POS_LE ((STOP), _MSB_POS (64, 31)) \
? 0 \
: _MASKn (32, \
_MSB_POS (32, 0), \
_MSB_32 ((START), (STOP))))))
#endif
#if !defined (MASK)
#error "MASK never undefined"
#endif
/* Multi-bit mask on least significant bits */
#if (WITH_TARGET_WORD_MSB == 0)
#define _LSMASKn(WIDTH, NR_BITS) _MASKn(WIDTH, (WIDTH - NR_BITS), (WIDTH - 1))
#else
#define _LSMASKn(WIDTH, NR_BITS) _MASKn(WIDTH, (NR_BITS - 1), 0)
#endif
#define LSMASK16(NR_BITS) _LSMASKn (16, (NR_BITS))
#define LSMASK32(NR_BITS) _LSMASKn (32, (NR_BITS))
#define LSMASK64(NR_BITS) _LSMASKn (64, (NR_BITS))
#if (WITH_TARGET_WORD_BITSIZE == 64)
#define LSMASK(NR_BITS) ((NR_BITS) < 1 \
? 0 \
: _MASKn (64, \
_LSB_POS (64, \
((NR_BITS) < 1 ? 0 \
: (NR_BITS) - 1)), \
_LSB_POS (64, 0)))
#endif
#if (WITH_TARGET_WORD_BITSIZE == 32)
#define LSMASK(NR_BITS) ((NR_BITS) < 1 \
? 0 \
: _MASKn (32, \
_LSB_POS (32, \
((NR_BITS) > 32 ? 31 \
: (NR_BITS) < 1 ? 0 \
: ((NR_BITS) - 1))), \
_LSB_POS (32, 0)))
#endif
#if !defined (LSMASK)
#error "LSMASK never defined"
#endif
/* Multi-bit mask on most significant bits */
#if (WITH_TARGET_WORD_MSB == 0)
#define _MSMASKn(WIDTH, NR_BITS) _MASKn (WIDTH, 0, (NR_BITS - 1))
#else
#define _MSMASKn(WIDTH, NR_BITS) _MASKn (WIDTH, (WIDTH - 1), (WIDTH - NR_BITS))
#endif
#define MSMASK16(NR_BITS) _MSMASKn (16, (NR_BITS))
#define MSMASK32(NR_BITS) _MSMASKn (32, (NR_BITS))
#define MSMASK64(NR_BITS) _MSMASKn (64, (NR_BITS))
#if (WITH_TARGET_WORD_BITSIZE == 64)
#define MSMASK(NR_BITS) (NR_BITS < 1 \
? 0 \
: _MASKn (64, \
_MSB_POS (64, 0), \
_MSB_POS (64, \
((NR_BITS) < 1 ? 0 \
: (NR_BITS) - 1))))
#endif
#if (WITH_TARGET_WORD_BITSIZE == 32)
#define MSMASK(NR_BITS) (NR_BITS <= 32 \
? 0 \
: _MASKn (32, \
_MSB_POS (32, 0), \
_MSB_POS (32, \
((NR_BITS) <= 32 ? 0 \
: (NR_BITS) - 33))))
#endif
#if !defined (MSMASK)
#error "MSMASK never defined"
#endif
/* mask the required bits, leaving them in place */
INLINE_SIM_BITS(unsigned16) MASKED16 (unsigned16 word, unsigned start, unsigned stop);
INLINE_SIM_BITS(unsigned32) MASKED32 (unsigned32 word, unsigned start, unsigned stop);
INLINE_SIM_BITS(unsigned64) MASKED64 (unsigned64 word, unsigned start, unsigned stop);
INLINE_SIM_BITS(unsigned_word) MASKED (unsigned_word word, unsigned start, unsigned stop);
/* Ditto but nr of ls-bits specified */
INLINE_SIM_BITS(unsigned16) LSMASKED16 (unsigned16 word, unsigned nr_bits);
INLINE_SIM_BITS(unsigned32) LSMASKED32 (unsigned32 word, unsigned nr_bits);
INLINE_SIM_BITS(unsigned64) LSMASKED64 (unsigned64 word, unsigned nr_bits);
INLINE_SIM_BITS(unsigned_word) LSMASKED (unsigned_word word, unsigned nr_bits);
/* Ditto but nr of ms-bits specified */
INLINE_SIM_BITS(unsigned16) MSMASKED16 (unsigned16 word, unsigned nr_bits);
INLINE_SIM_BITS(unsigned32) MSMASKED32 (unsigned32 word, unsigned nr_bits);
INLINE_SIM_BITS(unsigned64) MSMASKED64 (unsigned64 word, unsigned nr_bits);
INLINE_SIM_BITS(unsigned_word) MSMASKED (unsigned_word word, unsigned nr_bits);
/* extract the required bits aligning them with the lsb */
INLINE_SIM_BITS(unsigned16) EXTRACTED16 (unsigned16 val, unsigned start, unsigned stop);
INLINE_SIM_BITS(unsigned32) EXTRACTED32 (unsigned32 val, unsigned start, unsigned stop);
INLINE_SIM_BITS(unsigned64) EXTRACTED64 (unsigned64 val, unsigned start, unsigned stop);
INLINE_SIM_BITS(unsigned_word) EXTRACTED (unsigned_word val, unsigned start, unsigned stop);
/* move a single bit around */
/* NB: the wierdness (N>O?N-O:0) is to stop a warning from GCC */
#define _SHUFFLEDn(N, WORD, OLD, NEW) \
((OLD) < (NEW) \
? (((unsigned##N)(WORD) \
>> (((NEW) > (OLD)) ? ((NEW) - (OLD)) : 0)) \
& MASK32((NEW), (NEW))) \
: (((unsigned##N)(WORD) \
<< (((OLD) > (NEW)) ? ((OLD) - (NEW)) : 0)) \
& MASK32((NEW), (NEW))))
#define SHUFFLED32(WORD, OLD, NEW) _SHUFFLEDn (32, WORD, OLD, NEW)
#define SHUFFLED64(WORD, OLD, NEW) _SHUFFLEDn (64, WORD, OLD, NEW)
#define SHUFFLED(WORD, OLD, NEW) _SHUFFLEDn (_word, WORD, OLD, NEW)
/* move a group of bits around */
INLINE_SIM_BITS(unsigned16) INSERTED16 (unsigned16 val, unsigned start, unsigned stop);
INLINE_SIM_BITS(unsigned32) INSERTED32 (unsigned32 val, unsigned start, unsigned stop);
INLINE_SIM_BITS(unsigned64) INSERTED64 (unsigned64 val, unsigned start, unsigned stop);
INLINE_SIM_BITS(unsigned_word) INSERTED (unsigned_word val, unsigned start, unsigned stop);
/* depending on MODE return a 64bit or 32bit (sign extended) value */
#if (WITH_TARGET_WORD_BITSIZE == 64)
#define EXTENDED(X) ((signed64)(signed32)(X))
#endif
#if (WITH_TARGET_WORD_BITSIZE == 32)
#define EXTENDED(X) (X)
#endif
/* memory alignment macro's */
#define _ALIGNa(A,X) (((X) + ((A) - 1)) & ~((A) - 1))
#define _FLOORa(A,X) ((X) & ~((A) - 1))
#define ALIGN_8(X) _ALIGNa (8, X)
#define ALIGN_16(X) _ALIGNa (16, X)
#define ALIGN_PAGE(X) _ALIGNa (0x1000, X)
#define FLOOR_PAGE(X) ((X) & ~(0x1000 - 1))
/* bit bliting macro's */
#define BLIT32(V, POS, BIT) \
do { \
if (BIT) \
V |= BIT32 (POS); \
else \
V &= ~BIT32 (POS); \
} while (0)
#define MBLIT32(V, LO, HI, VAL) \
do { \
(V) = (((V) & ~MASK32 ((LO), (HI))) \
| INSERTED32 (VAL, LO, HI)); \
} while (0)
/* some rotate functions. The generic macro's ROT, ROTL, ROTR are
intentionally omited. */
INLINE_SIM_BITS(unsigned16) ROT16 (unsigned16 val, int shift);
INLINE_SIM_BITS(unsigned32) ROT32 (unsigned32 val, int shift);
INLINE_SIM_BITS(unsigned64) ROT64 (unsigned64 val, int shift);
INLINE_SIM_BITS(unsigned16) ROTL16 (unsigned16 val, unsigned shift);
INLINE_SIM_BITS(unsigned32) ROTL32 (unsigned32 val, unsigned shift);
INLINE_SIM_BITS(unsigned64) ROTL64 (unsigned64 val, unsigned shift);
INLINE_SIM_BITS(unsigned16) ROTR16 (unsigned16 val, unsigned shift);
INLINE_SIM_BITS(unsigned32) ROTR32 (unsigned32 val, unsigned shift);
INLINE_SIM_BITS(unsigned64) ROTR64 (unsigned64 val, unsigned shift);
/* Sign extension operations */
INLINE_SIM_BITS(unsigned16) SEXT16 (signed16 val, unsigned sign_bit);
INLINE_SIM_BITS(unsigned32) SEXT32 (signed32 val, unsigned sign_bit);
INLINE_SIM_BITS(unsigned64) SEXT64 (signed64 val, unsigned sign_bit);
INLINE_SIM_BITS(unsigned_word) SEXT (signed_word val, unsigned sign_bit);
#if ((SIM_BITS_INLINE & INCLUDE_MODULE) && (SIM_BITS_INLINE & INCLUDED_BY_MODULE))
#include "sim-bits.c"
#endif
#endif /* _SIM_BITS_H_ */

356
sim/common/sim-endian.h Normal file
View File

@ -0,0 +1,356 @@
/* This file is part of the program psim.
Copyright (C) 1994-1995, Andrew Cagney <cagney@highland.com.au>
Copyright (C) 1997, Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _SIM_ENDIAN_H_
#define _SIM_ENDIAN_H_
/* C byte conversion functions */
INLINE_SIM_ENDIAN(unsigned_1) endian_h2t_1(unsigned_1 x);
INLINE_SIM_ENDIAN(unsigned_2) endian_h2t_2(unsigned_2 x);
INLINE_SIM_ENDIAN(unsigned_4) endian_h2t_4(unsigned_4 x);
INLINE_SIM_ENDIAN(unsigned_8) endian_h2t_8(unsigned_8 x);
INLINE_SIM_ENDIAN(unsigned_1) endian_t2h_1(unsigned_1 x);
INLINE_SIM_ENDIAN(unsigned_2) endian_t2h_2(unsigned_2 x);
INLINE_SIM_ENDIAN(unsigned_4) endian_t2h_4(unsigned_4 x);
INLINE_SIM_ENDIAN(unsigned_8) endian_t2h_8(unsigned_8 x);
INLINE_SIM_ENDIAN(unsigned_1) swap_1(unsigned_1 x);
INLINE_SIM_ENDIAN(unsigned_2) swap_2(unsigned_2 x);
INLINE_SIM_ENDIAN(unsigned_4) swap_4(unsigned_4 x);
INLINE_SIM_ENDIAN(unsigned_8) swap_8(unsigned_8 x);
INLINE_SIM_ENDIAN(unsigned_1) endian_h2be_1(unsigned_1 x);
INLINE_SIM_ENDIAN(unsigned_2) endian_h2be_2(unsigned_2 x);
INLINE_SIM_ENDIAN(unsigned_4) endian_h2be_4(unsigned_4 x);
INLINE_SIM_ENDIAN(unsigned_8) endian_h2be_8(unsigned_8 x);
INLINE_SIM_ENDIAN(unsigned_1) endian_be2h_1(unsigned_1 x);
INLINE_SIM_ENDIAN(unsigned_2) endian_be2h_2(unsigned_2 x);
INLINE_SIM_ENDIAN(unsigned_4) endian_be2h_4(unsigned_4 x);
INLINE_SIM_ENDIAN(unsigned_8) endian_be2h_8(unsigned_8 x);
INLINE_SIM_ENDIAN(unsigned_1) endian_h2le_1(unsigned_1 x);
INLINE_SIM_ENDIAN(unsigned_2) endian_h2le_2(unsigned_2 x);
INLINE_SIM_ENDIAN(unsigned_4) endian_h2le_4(unsigned_4 x);
INLINE_SIM_ENDIAN(unsigned_8) endian_h2le_8(unsigned_8 x);
INLINE_SIM_ENDIAN(unsigned_1) endian_le2h_1(unsigned_1 x);
INLINE_SIM_ENDIAN(unsigned_2) endian_le2h_2(unsigned_2 x);
INLINE_SIM_ENDIAN(unsigned_4) endian_le2h_4(unsigned_4 x);
INLINE_SIM_ENDIAN(unsigned_8) endian_le2h_8(unsigned_8 x);
INLINE_SIM_ENDIAN(void*) offset_1(unsigned_1 *x, int ws, int w);
INLINE_SIM_ENDIAN(void*) offset_2(unsigned_2 *x, int ws, int w);
INLINE_SIM_ENDIAN(void*) offset_4(unsigned_4 *x, int ws, int w);
INLINE_SIM_ENDIAN(void*) offset_8(unsigned_8 *x, int ws, int w);
/* SWAP */
#define SWAP_1(X) swap_1(X)
#define SWAP_2(X) swap_2(X)
#define SWAP_4(X) swap_4(X)
#define SWAP_8(X) swap_8(X)
/* HOST to BE */
#define H2BE_1(X) endian_h2be_1(X)
#define H2BE_2(X) endian_h2be_2(X)
#define H2BE_4(X) endian_h2be_4(X)
#define H2BE_8(X) endian_h2be_8(X)
#define BE2H_1(X) endian_be2h_1(X)
#define BE2H_2(X) endian_be2h_2(X)
#define BE2H_4(X) endian_be2h_4(X)
#define BE2H_8(X) endian_be2h_8(X)
/* HOST to LE */
#define H2LE_1(X) endian_h2le_1(X)
#define H2LE_2(X) endian_h2le_2(X)
#define H2LE_4(X) endian_h2le_4(X)
#define H2LE_8(X) endian_h2le_8(X)
#define LE2H_1(X) endian_le2h_1(X)
#define LE2H_2(X) endian_le2h_2(X)
#define LE2H_4(X) endian_le2h_4(X)
#define LE2H_8(X) endian_le2h_8(X)
/* HOST to TARGET */
#define H2T_1(X) endian_h2t_1(X)
#define H2T_2(X) endian_h2t_2(X)
#define H2T_4(X) endian_h2t_4(X)
#define H2T_8(X) endian_h2t_8(X)
#define T2H_1(X) endian_t2h_1(X)
#define T2H_2(X) endian_t2h_2(X)
#define T2H_4(X) endian_t2h_4(X)
#define T2H_8(X) endian_t2h_8(X)
/* CONVERT IN PLACE
These macros, given an argument of unknown size, swap its value in
place if a host/target conversion is required. */
#define H2T(VARIABLE) \
do { \
switch (sizeof(VARIABLE)) { \
case 1: VARIABLE = H2T_1(VARIABLE); break; \
case 2: VARIABLE = H2T_2(VARIABLE); break; \
case 4: VARIABLE = H2T_4(VARIABLE); break; \
case 8: VARIABLE = H2T_8(VARIABLE); break; \
} \
} while (0)
#define T2H(VARIABLE) \
do { \
switch (sizeof(VARIABLE)) { \
case 1: VARIABLE = T2H_1(VARIABLE); break; \
case 2: VARIABLE = T2H_2(VARIABLE); break; \
case 4: VARIABLE = T2H_4(VARIABLE); break; \
case 8: VARIABLE = T2H_8(VARIABLE); break; \
} \
} while (0)
#define SWAP(VARIABLE) \
do { \
switch (sizeof(VARIABLE)) { \
case 1: VARIABLE = SWAP_1(VARIABLE); break; \
case 2: VARIABLE = SWAP_2(VARIABLE); break; \
case 4: VARIABLE = SWAP_4(VARIABLE); break; \
case 8: VARIABLE = SWAP_8(VARIABLE); break; \
} \
} while (0)
#define H2BE(VARIABLE) \
do { \
switch (sizeof(VARIABLE)) { \
case 1: VARIABLE = H2BE_1(VARIABLE); break; \
case 2: VARIABLE = H2BE_2(VARIABLE); break; \
case 4: VARIABLE = H2BE_4(VARIABLE); break; \
case 8: VARIABLE = H2BE_8(VARIABLE); break; \
} \
} while (0)
#define BE2H(VARIABLE) \
do { \
switch (sizeof(VARIABLE)) { \
case 1: VARIABLE = BE2H_1(VARIABLE); break; \
case 2: VARIABLE = BE2H_2(VARIABLE); break; \
case 4: VARIABLE = BE2H_4(VARIABLE); break; \
case 8: VARIABLE = BE2H_8(VARIABLE); break; \
} \
} while (0)
#define H2LE(VARIABLE) \
do { \
switch (sizeof(VARIABLE)) { \
case 1: VARIABLE = H2LE_1(VARIABLE); break; \
case 2: VARIABLE = H2LE_2(VARIABLE); break; \
case 4: VARIABLE = H2LE_4(VARIABLE); break; \
case 8: VARIABLE = H2LE_8(VARIABLE); break; \
} \
} while (0)
#define LE2H(VARIABLE) \
do { \
switch (sizeof(VARIABLE)) { \
case 1: VARIABLE = LE2H_1(VARIABLE); break; \
case 2: VARIABLE = LE2H_2(VARIABLE); break; \
case 4: VARIABLE = LE2H_4(VARIABLE); break; \
case 8: VARIABLE = LE2H_8(VARIABLE); break; \
} \
} while (0)
/* TARGET WORD:
Byte swap a quantity the size of the targets word */
#if (WITH_TARGET_WORD_BITSIZE == 64)
#define H2T_word(X) H2T_8(X)
#define T2H_word(X) T2H_8(X)
#define H2BE_word(X) H2BE_8(X)
#define BE2H_word(X) BE2H_8(X)
#define H2LE_word(X) H2LE_8(X)
#define LE2H_word(X) LE2H_8(X)
#define SWAP_word(X) SWAP_8(X)
#endif
#if (WITH_TARGET_WORD_BITSIZE == 32)
#define H2T_word(X) H2T_4(X)
#define T2H_word(X) T2H_4(X)
#define H2BE_word(X) H2BE_4(X)
#define BE2H_word(X) BE2H_4(X)
#define H2LE_word(X) H2LE_4(X)
#define LE2H_word(X) LE2H_4(X)
#define SWAP_word(X) SWAP_4(X)
#endif
/* TARGET CELL:
Byte swap a quantity the size of the targets IEEE 1275 memory cell */
#define H2T_cell(X) H2T_4(X)
#define T2H_cell(X) T2H_4(X)
#define H2BE_cell(X) H2BE_4(X)
#define BE2H_cell(X) BE2H_4(X)
#define H2LE_cell(X) H2LE_4(X)
#define LE2H_cell(X) LE2H_4(X)
#define SWAP_cell(X) SWAP_4(X)
/* HOST Offsets:
Address of high/low sub-word within a host word quantity.
Address of sub-word N within a host word quantity. NOTE: Numbering
is BIG endian always. */
#define AH1_2(X) (unsigned_1*)offset_2((X), 1, 0)
#define AL1_2(X) (unsigned_1*)offset_2((X), 1, 1)
#define AH2_4(X) (unsigned_2*)offset_4((X), 2, 0)
#define AL2_4(X) (unsigned_2*)offset_4((X), 2, 1)
#define AH4_8(X) (unsigned_4*)offset_8((X), 4, 0)
#define AL4_8(X) (unsigned_4*)offset_8((X), 4, 1)
#if (WITH_TARGET_WORD_BITSIZE == 64)
#define AH_word(X) AH4_8(X)
#define AL_word(X) AL4_8(X)
#endif
#if (WITH_TARGET_WORD_BITSIZE == 32)
#define AH_word(X) AH2_4(X)
#define AL_word(X) AL2_4(X)
#endif
#define A1_2(X,N) (unsigned_1*)offset_2((X), 1, (N))
#define A1_4(X,N) (unsigned_1*)offset_4((X), 1, (N))
#define A2_4(X,N) (unsigned_2*)offset_4((X), 2, (N))
#define A1_8(X,N) (unsigned_1*)offset_8((X), 1, (N))
#define A2_8(X,N) (unsigned_2*)offset_8((X), 2, (N))
#define A4_8(X,N) (unsigned_4*)offset_8((X), 4, (N))
/* HOST Components:
Value of sub-word within a host word quantity */
#define VH1_2(X) ((unsigned_1)((unsigned_2)(X) >> 8))
#define VL1_2(X) (unsigned_1)(X)
#define VH2_4(X) ((unsigned_2)((unsigned_4)(X) >> 16))
#define VL2_4(X) ((unsigned_2)(X))
#define VH4_8(X) ((unsigned_4)((unsigned_8)(X) >> 32))
#define VL4_8(X) ((unsigned_4)(X))
#if (WITH_TARGET_WORD_BITSIZE == 64)
#define VH_word(X) VH4_8(X)
#define VL_word(X) VL4_8(X)
#endif
#if (WITH_TARGET_WORD_BITSIZE == 32)
#define VH_word(X) VH2_4(X)
#define VL_word(X) VL2_4(X)
#endif
#define V1_2(X,N) ((unsigned_1)((unsigned_2)(X) >> ( 8 * (1 - (N)))))
#define V1_4(X,N) ((unsigned_1)((unsigned_4)(X) >> ( 8 * (3 - (N)))))
#define V1_8(X,N) ((unsigned_1)((unsigned_8)(X) >> ( 8 * (7 - (N)))))
#define V2_4(X,N) ((unsigned_2)((unsigned_4)(X) >> (16 * (1 - (N)))))
#define V2_8(X,N) ((unsigned_2)((unsigned_8)(X) >> (16 * (3 - (N)))))
#define V4_8(X,N) ((unsigned_4)((unsigned_8)(X) >> (32 * (1 - (N)))))
/* Reverse - insert sub-word into word quantity */
#define V2_H1(X) ((unsigned_2)(unsigned_1)(X) << 8)
#define V2_L1(X) ((unsigned_2)(unsigned_1)(X))
#define V4_H2(X) ((unsigned_4)(unsigned_2)(X) << 16)
#define V4_L2(X) ((unsigned_4)(unsigned_2)(X))
#define V8_H4(X) ((unsigned_8)(unsigned_4)(X) << 32)
#define V8_L4(X) ((unsigned_8)(unsigned_4)(X))
#define V2_1(X,N) ((unsigned_2)(unsigned_1)(X) << ( 8 * (1 - (N))))
#define V4_1(X,N) ((unsigned_4)(unsigned_1)(X) << ( 8 * (3 - (N))))
#define V8_1(X,N) ((unsigned_8)(unsigned_1)(X) << ( 8 * (7 - (N))))
#define V4_2(X,N) ((unsigned_4)(unsigned_2)(X) << (16 * (1 - (N))))
#define V8_2(X,N) ((unsigned_8)(unsigned_2)(X) << (16 * (3 - (N))))
#define V8_4(X,N) ((unsigned_8)(unsigned_4)(X) << (32 * (1 - (N))))
/* Reverse - insert N sub-words into single word quantity */
#define U2_1(I0,I1) (V2_1(I0,0) | V2_1(I1,1))
#define U4_1(I0,I1,I2,I3) (V4_1(I0,0) | V4_1(I1,1) | V4_1(I2,2) | V4_1(I3,3))
#define U8_1(I0,I1,I2,I3,I4,I5,I6,I7) \
(V8_1(I0,0) | V8_1(I1,1) | V8_1(I2,2) | V8_1(I3,3) \
| V8_1(I4,4) | V8_1(I5,5) | V8_1(I6,6) | V8_1(I7,7))
#define U4_2(I0,I1) (V4_2(I0,0) | V4_2(I1,1))
#define U8_2(I0,I1,I2,I3) (V8_2(I0,0) | V8_2(I1,1) | V8_2(I2,2) | V8_2(I3,3))
#define U8_4(I0,I1) (V8_4(I0,0) | V8_4(I1,1))
#if (WITH_TARGET_WORD_BITSIZE == 64)
#define Vword_H(X) V8_H4(X)
#define Vword_L(X) V8_L4(X)
#endif
#if (WITH_TARGET_WORD_BITSIZE == 32)
#define Vword_H(X) V4_H2(X)
#define Vword_L(X) V4_L2(X)
#endif
#if (SIM_ENDIAN_INLINE & INCLUDE_MODULE)
# include "sim-endian.c"
#endif
#endif /* _SIM_ENDIAN_H_ */

View File

@ -1,5 +1,8 @@
Wed Aug 27 10:24:15 1997 Andrew Cagney <cagney@b1.cygnus.com>
* sim_calls.c (sim_create_inferior): Check the simulator was
initialized before creating inferior.
* idecode_expression.h (ALU_END): From Charles Lefurgy - Extract
sign bit using 64 bit and not a 32 bit mask.

View File

@ -1,5 +1,7 @@
Wed Aug 27 13:41:24 1997 Andrew Cagney <cagney@b1.cygnus.com>
* insns (do_st): Use U8_4 instead of V4_L8.
* sim-calls.c (sim_open): Add call to sim_analyze_program, update
call to sim_config.

View File

@ -1059,7 +1059,7 @@ void::function::do_st:int Source, unsigned32 base, unsigned32 *rBase, int m , in
"0x%lx: st.d with odd source register %d",
cia.ip, Source);
addr = base + (S ? (offset << 3) : offset);
val = (V4_H8 (GPR(Source + 1)) | V4_L8 (GPR(Source)));
val = U8_4 (GPR(Source + 1), GPR(Source));
STORE (addr, 8, val);
}
break;