ppc patch queue for 2021-11-09

Here's the latest set of ppc related patches for qemu-6.2, which I
 hope will squeeze in just barely before the hard freeze.
 
 This set includes a change to MAINTAINERS moving maintainership of ppc
 from myself and Greg Kurz to Cédric le Goater and Daniel Henrique
 Barboza.  So, I expect this to be my last pull request as ppc
 maintainer.  It's been great, but it's time I moved onto other things.
 
 Apart from that, this patchset is mostly a lot of updates to TCG
 implementations of ISA 3.1 (POWER10) instructions from the El Dorado
 team.  There are also a handful of other fixes.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAmGJ9akACgkQbDjKyiDZ
 s5KLQRAAk9zKL4439gv6zF00hqb1kKDeQVov/XbR6LQrHQ2pPPDcKJ0FN8zKqpHv
 5zanwUe7c7ys/53obVK8Lf2PNVTrM2JZqBCFIMaaiERgJMuR9rgonKos1C1iY9WJ
 Vak3+kfW+IFCD9XBewpQ0RvyWUq23s7njvdZ7zh4FAmVaZSzVGn3Frv2fk0Pp7B3
 1ig7FR2P0ocGl66mHGxXSIHyoQvBXOJbC46eCVUXl+wKilP71o+KVO62BYwfgIEg
 E0nwzPG+SwUaL12QDBF++T+CvzELwj3Nyy66D+o5PybkLYulDk6u89Mbm5vHQjjG
 6Y957RZShqfhhXMf8SV3Bv93A8dWpRg3nMPxQ1TUtaDjFyKJbXR4tf3ifpv72yHt
 ib3KLkdWgn0gkPXQFMnG9BsKQ56Dqf3z3jSBBxbsbCMIocq6I5fca5ypVSqyQBYt
 aNJf6Gf2G1B/rHTXyyHIt+vOuvX4ts0TtY2IUyybb+5pR9STXIkBUUxXPc04ymr9
 RjRs/wr0KuboLSO39YZcFRc/lazF5mftJFDqg4jys/7anBv9uIwqfmgWFFhuAYR5
 ZnwBO4bRZPBTFcd6IkSXePwEI2T/AKASoj9fFZ+glmcAg4/9cALM65dXBlfSDN4y
 SSUHT6yk6T0of+ZOSu8cuC/OlMw2p1Bp6eTGqIciDeHjL0udP1s=
 =LFng
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-6.2-20211109' into staging

ppc patch queue for 2021-11-09

Here's the latest set of ppc related patches for qemu-6.2, which I
hope will squeeze in just barely before the hard freeze.

This set includes a change to MAINTAINERS moving maintainership of ppc
from myself and Greg Kurz to Cédric le Goater and Daniel Henrique
Barboza.  So, I expect this to be my last pull request as ppc
maintainer.  It's been great, but it's time I moved onto other things.

Apart from that, this patchset is mostly a lot of updates to TCG
implementations of ISA 3.1 (POWER10) instructions from the El Dorado
team.  There are also a handful of other fixes.

# gpg: Signature made Tue 09 Nov 2021 05:14:33 AM CET
# gpg:                using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]

* remotes/dgibson/tags/ppc-for-6.2-20211109: (54 commits)
  spapr_numa.c: FORM2 table handle nodes with no distance info
  target/ppc, hw/ppc: Change maintainers
  target/ppc: cntlzdm/cnttzdm implementation without brcond
  target/ppc: Implement lxvkq instruction
  target/ppc: Implement xxblendvb/xxblendvh/xxblendvw/xxblendvd instructions
  target/ppc: implemented XXSPLTIDP instruction
  target/ppc: Implemented XXSPLTIW using decodetree
  target/ppc: implemented XXSPLTI32DX
  target/ppc: moved XXSPLTIB to using decodetree
  target/ppc: moved XXSPLTW to using decodetree
  target/ppc: added the instructions PLXVP and PSTXVP
  target/ppc: added the instructions PLXV and PSTXV
  target/ppc: added the instructions LXVPX and STXVPX
  target/ppc: added the instructions LXVP and STXVP
  target/ppc: moved stxvx and lxvx from legacy to decodtree
  target/ppc: moved stxv and lxv from legacy to decodtree
  target/ppc: receive high/low as argument in get/set_cpu_vsr
  target/ppc: Introduce REQUIRE_VSX macro
  target/ppc: Implement Vector Extract Double to VSR using GPR index insns
  target/ppc: Move vinsertb/vinserth/vinsertw/vinsertd to decodetree
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-11-09 07:18:33 +01:00
commit f10e7b9f6f
25 changed files with 2171 additions and 1297 deletions

View File

@ -262,8 +262,10 @@ F: hw/openrisc/
F: tests/tcg/openrisc/
PowerPC TCG CPUs
M: David Gibson <david@gibson.dropbear.id.au>
M: Greg Kurz <groug@kaod.org>
M: Cédric Le Goater <clg@kaod.org>
M: Daniel Henrique Barboza <danielhb413@gmail.com>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Maintained
F: target/ppc/
@ -382,8 +384,10 @@ F: target/mips/kvm*
F: target/mips/sysemu/
PPC KVM CPUs
M: David Gibson <david@gibson.dropbear.id.au>
M: Greg Kurz <groug@kaod.org>
M: Cédric Le Goater <clg@kaod.org>
M: Daniel Henrique Barboza <danielhb413@gmail.com>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
S: Maintained
F: target/ppc/kvm.c
@ -1321,8 +1325,10 @@ F: include/hw/rtc/m48t59.h
F: tests/avocado/ppc_prep_40p.py
sPAPR
M: David Gibson <david@gibson.dropbear.id.au>
M: Greg Kurz <groug@kaod.org>
M: Cédric Le Goater <clg@kaod.org>
M: Daniel Henrique Barboza <danielhb413@gmail.com>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org
S: Maintained
F: hw/*/spapr*
@ -1382,6 +1388,8 @@ F: include/hw/pci-host/mv64361.h
Virtual Open Firmware (VOF)
M: Alexey Kardashevskiy <aik@ozlabs.ru>
R: Cédric Le Goater <clg@kaod.org>
R: Daniel Henrique Barboza <danielhb413@gmail.com>
R: David Gibson <david@gibson.dropbear.id.au>
R: Greg Kurz <groug@kaod.org>
L: qemu-ppc@nongnu.org

View File

@ -23,6 +23,7 @@
#include "hw/qdev-properties.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
#include "sysemu/qtest.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/fw-path-provider.h"
@ -199,7 +200,7 @@ static void pegasos2_init(MachineState *machine)
if (!pm->vof) {
warn_report("Option -kernel may be ineffective with -bios.");
}
} else if (pm->vof) {
} else if (pm->vof && !qtest_enabled()) {
warn_report("Using Virtual OpenFirmware but no -kernel option.");
}

View File

@ -36,7 +36,7 @@ static void pnv_pnor_update(PnvPnor *s, int offset, int size)
int offset_end;
int ret;
if (s->blk) {
if (!s->blk || !blk_is_writable(s->blk)) {
return;
}

View File

@ -546,12 +546,24 @@ static void spapr_numa_FORM2_write_rtas_tables(SpaprMachineState *spapr,
* NUMA nodes, but QEMU adds the default NUMA node without
* adding the numa_info to retrieve distance info from.
*/
if (src == dst) {
distance_table[i++] = NUMA_DISTANCE_MIN;
continue;
distance_table[i] = numa_info[src].distance[dst];
if (distance_table[i] == 0) {
/*
* In case QEMU adds a default NUMA single node when the user
* did not add any, or where the user did not supply distances,
* the value will be 0 here. Populate the table with a fallback
* simple local / remote distance.
*/
if (src == dst) {
distance_table[i] = NUMA_DISTANCE_MIN;
} else {
distance_table[i] = numa_info[src].distance[dst];
if (distance_table[i] < NUMA_DISTANCE_MIN) {
distance_table[i] = NUMA_DISTANCE_DEFAULT;
}
}
}
distance_table[i++] = numa_info[src].distance[dst];
i++;
}
}

View File

@ -116,12 +116,16 @@
decNumber * decNumberFromUInt32(decNumber *, uint32_t);
decNumber *decNumberFromInt64(decNumber *, int64_t);
decNumber *decNumberFromUInt64(decNumber *, uint64_t);
decNumber *decNumberFromInt128(decNumber *, uint64_t, int64_t);
decNumber *decNumberFromUInt128(decNumber *, uint64_t, uint64_t);
decNumber * decNumberFromString(decNumber *, const char *, decContext *);
char * decNumberToString(const decNumber *, char *);
char * decNumberToEngString(const decNumber *, char *);
uint32_t decNumberToUInt32(const decNumber *, decContext *);
int32_t decNumberToInt32(const decNumber *, decContext *);
int64_t decNumberIntegralToInt64(const decNumber *dn, decContext *set);
void decNumberIntegralToInt128(const decNumber *dn, decContext *set,
uint64_t *plow, uint64_t *phigh);
uint8_t * decNumberGetBCD(const decNumber *, uint8_t *);
decNumber * decNumberSetBCD(decNumber *, const uint8_t *, uint32_t);

View File

@ -98,7 +98,7 @@
/* Shared lookup tables */
extern const uByte DECSTICKYTAB[10]; /* re-round digits if sticky */
extern const uLong DECPOWERS[19]; /* powers of ten table */
extern const uLong DECPOWERS[20]; /* powers of ten table */
/* The following are included from decDPD.h */
extern const uShort DPD2BIN[1024]; /* DPD -> 0-999 */
extern const uShort BIN2DPD[1000]; /* 0-999 -> DPD */

View File

@ -590,6 +590,42 @@ static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
#endif
}
/*
* Unsigned 128x64 multiplication.
* Returns true if the result got truncated to 128 bits.
* Otherwise, returns false and the multiplication result via plow and phigh.
*/
static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor)
{
#if defined(CONFIG_INT128) && \
(__has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5)
bool res;
__uint128_t r;
__uint128_t f = ((__uint128_t)*phigh << 64) | *plow;
res = __builtin_mul_overflow(f, factor, &r);
*plow = r;
*phigh = r >> 64;
return res;
#else
uint64_t dhi = *phigh;
uint64_t dlo = *plow;
uint64_t ahi;
uint64_t blo, bhi;
if (dhi == 0) {
mulu64(plow, phigh, dlo, factor);
return false;
}
mulu64(plow, &ahi, dlo, factor);
mulu64(&blo, &bhi, dhi, factor);
return uadd64_overflow(ahi, blo, phigh) || bhi != 0;
#endif
}
/**
* uadd64_carry - addition with carry-in and carry-out
* @x, @y: addends

View File

@ -53,12 +53,13 @@ static const Flag *mfctop=(Flag *)&mfcone; /* -> top byte */
const uByte DECSTICKYTAB[10]={1,1,2,3,4,6,6,7,8,9}; /* used if sticky */
/* ------------------------------------------------------------------ */
/* Powers of ten (powers[n]==10**n, 0<=n<=9) */
/* Powers of ten (powers[n]==10**n, 0<=n<=19) */
/* ------------------------------------------------------------------ */
const uLong DECPOWERS[19] = {1, 10, 100, 1000, 10000, 100000, 1000000,
const uLong DECPOWERS[20] = {1, 10, 100, 1000, 10000, 100000, 1000000,
10000000, 100000000, 1000000000, 10000000000ULL, 100000000000ULL,
1000000000000ULL, 10000000000000ULL, 100000000000000ULL, 1000000000000000ULL,
10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL, };
10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL,
10000000000000000000ULL,};
/* ------------------------------------------------------------------ */
/* decContextClearStatus -- clear bits in current status */

View File

@ -167,6 +167,7 @@
/* ------------------------------------------------------------------ */
#include "qemu/osdep.h"
#include "qemu/host-utils.h"
#include "libdecnumber/dconfig.h"
#include "libdecnumber/decNumber.h"
#include "libdecnumber/decNumberLocal.h"
@ -263,6 +264,7 @@ static decNumber * decTrim(decNumber *, decContext *, Flag, Int *);
static Int decUnitAddSub(const Unit *, Int, const Unit *, Int, Int,
Unit *, Int);
static Int decUnitCompare(const Unit *, Int, const Unit *, Int, Int);
static bool mulUInt128ByPowOf10(uLong *, uLong *, uInt);
#if !DECSUBSET
/* decFinish == decFinalize when no subset arithmetic needed */
@ -462,6 +464,41 @@ decNumber *decNumberFromUInt64(decNumber *dn, uint64_t uin)
return dn;
} /* decNumberFromUInt64 */
decNumber *decNumberFromInt128(decNumber *dn, uint64_t lo, int64_t hi)
{
uint64_t unsig_hi = hi;
if (hi < 0) {
if (lo == 0) {
unsig_hi = -unsig_hi;
} else {
unsig_hi = ~unsig_hi;
lo = -lo;
}
}
decNumberFromUInt128(dn, lo, unsig_hi);
if (hi < 0) {
dn->bits = DECNEG; /* sign needed */
}
return dn;
} /* decNumberFromInt128 */
decNumber *decNumberFromUInt128(decNumber *dn, uint64_t lo, uint64_t hi)
{
uint64_t rem;
Unit *up; /* work pointer */
decNumberZero(dn); /* clean */
if (lo == 0 && hi == 0) {
return dn; /* [or decGetDigits bad call] */
}
for (up = dn->lsu; hi > 0 || lo > 0; up++) {
rem = divu128(&lo, &hi, DECDPUNMAX + 1);
*up = (Unit)rem;
}
dn->digits = decGetDigits(dn->lsu, up - dn->lsu);
return dn;
} /* decNumberFromUInt128 */
/* ------------------------------------------------------------------ */
/* to-int64 -- conversion to int64 */
/* */
@ -506,6 +543,68 @@ Invalid:
return 0;
} /* decNumberIntegralToInt64 */
/* ------------------------------------------------------------------ */
/* decNumberIntegralToInt128 -- conversion to int128 */
/* */
/* dn is the decNumber to convert. dn is assumed to have been */
/* rounded to a floating point integer value. */
/* set is the context for reporting errors */
/* returns the converted decNumber via plow and phigh */
/* */
/* Invalid is set if the decNumber is a NaN, Infinite or is out of */
/* range for a signed 128 bit integer. */
/* ------------------------------------------------------------------ */
void decNumberIntegralToInt128(const decNumber *dn, decContext *set,
uint64_t *plow, uint64_t *phigh)
{
int d; /* work */
const Unit *up; /* .. */
uint64_t lo = 0, hi = 0;
if (decNumberIsSpecial(dn) || (dn->exponent < 0) ||
(dn->digits + dn->exponent > 39)) {
goto Invalid;
}
up = dn->lsu; /* -> lsu */
for (d = (dn->digits - 1) / DECDPUN; d >= 0; d--) {
if (mulu128(&lo, &hi, DECDPUNMAX + 1)) {
/* overflow */
goto Invalid;
}
if (uadd64_overflow(lo, up[d], &lo)) {
if (uadd64_overflow(hi, 1, &hi)) {
/* overflow */
goto Invalid;
}
}
}
if (mulUInt128ByPowOf10(&lo, &hi, dn->exponent)) {
/* overflow */
goto Invalid;
}
if (decNumberIsNegative(dn)) {
if (lo == 0) {
*phigh = -hi;
*plow = 0;
} else {
*phigh = ~hi;
*plow = -lo;
}
} else {
*plow = lo;
*phigh = hi;
}
return;
Invalid:
decContextSetStatus(set, DEC_Invalid_operation);
} /* decNumberIntegralToInt128 */
/* ------------------------------------------------------------------ */
/* to-scientific-string -- conversion to numeric string */
@ -7849,6 +7948,38 @@ static Int decGetDigits(Unit *uar, Int len) {
return digits;
} /* decGetDigits */
/* ------------------------------------------------------------------ */
/* mulUInt128ByPowOf10 -- multiply a 128-bit unsigned integer by a */
/* power of 10. */
/* */
/* The 128-bit factor composed of plow and phigh is multiplied */
/* by 10^exp. */
/* */
/* plow pointer to the low 64 bits of the first factor */
/* phigh pointer to the high 64 bits of the first factor */
/* exp the exponent of the power of 10 of the second factor */
/* */
/* If the result fits in 128 bits, returns false and the */
/* multiplication result through plow and phigh. */
/* Otherwise, returns true. */
/* ------------------------------------------------------------------ */
static bool mulUInt128ByPowOf10(uLong *plow, uLong *phigh, uInt pow10)
{
while (pow10 >= ARRAY_SIZE(powers)) {
if (mulu128(plow, phigh, powers[ARRAY_SIZE(powers) - 1])) {
/* Overflow */
return true;
}
pow10 -= ARRAY_SIZE(powers) - 1;
}
if (pow10 > 0) {
return mulu128(plow, phigh, powers[pow10]);
} else {
return false;
}
}
#if DECTRACE | DECCHECK
/* ------------------------------------------------------------------ */
/* decNumberShow -- display a number [debug aid] */

View File

@ -51,6 +51,11 @@ static void set_dfp128(ppc_fprp_t *dfp, ppc_vsr_t *src)
dfp[1].VsrD(0) = src->VsrD(1);
}
static void set_dfp128_to_avr(ppc_avr_t *dst, ppc_vsr_t *src)
{
*dst = *src;
}
struct PPC_DFP {
CPUPPCState *env;
ppc_vsr_t vt, va, vb;
@ -440,8 +445,8 @@ static void ADD_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXISI_add(dfp);
}
DFP_HELPER_TAB(dadd, decNumberAdd, ADD_PPs, 64)
DFP_HELPER_TAB(daddq, decNumberAdd, ADD_PPs, 128)
DFP_HELPER_TAB(DADD, decNumberAdd, ADD_PPs, 64)
DFP_HELPER_TAB(DADDQ, decNumberAdd, ADD_PPs, 128)
static void SUB_PPs(struct PPC_DFP *dfp)
{
@ -453,8 +458,8 @@ static void SUB_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXISI_subtract(dfp);
}
DFP_HELPER_TAB(dsub, decNumberSubtract, SUB_PPs, 64)
DFP_HELPER_TAB(dsubq, decNumberSubtract, SUB_PPs, 128)
DFP_HELPER_TAB(DSUB, decNumberSubtract, SUB_PPs, 64)
DFP_HELPER_TAB(DSUBQ, decNumberSubtract, SUB_PPs, 128)
static void MUL_PPs(struct PPC_DFP *dfp)
{
@ -466,8 +471,8 @@ static void MUL_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXIMZ(dfp);
}
DFP_HELPER_TAB(dmul, decNumberMultiply, MUL_PPs, 64)
DFP_HELPER_TAB(dmulq, decNumberMultiply, MUL_PPs, 128)
DFP_HELPER_TAB(DMUL, decNumberMultiply, MUL_PPs, 64)
DFP_HELPER_TAB(DMULQ, decNumberMultiply, MUL_PPs, 128)
static void DIV_PPs(struct PPC_DFP *dfp)
{
@ -481,8 +486,8 @@ static void DIV_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXIDI(dfp);
}
DFP_HELPER_TAB(ddiv, decNumberDivide, DIV_PPs, 64)
DFP_HELPER_TAB(ddivq, decNumberDivide, DIV_PPs, 128)
DFP_HELPER_TAB(DDIV, decNumberDivide, DIV_PPs, 64)
DFP_HELPER_TAB(DDIVQ, decNumberDivide, DIV_PPs, 128)
#define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \
uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
@ -502,8 +507,8 @@ static void CMPU_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXSNAN(dfp);
}
DFP_HELPER_BF_AB(dcmpu, decNumberCompare, CMPU_PPs, 64)
DFP_HELPER_BF_AB(dcmpuq, decNumberCompare, CMPU_PPs, 128)
DFP_HELPER_BF_AB(DCMPU, decNumberCompare, CMPU_PPs, 64)
DFP_HELPER_BF_AB(DCMPUQ, decNumberCompare, CMPU_PPs, 128)
static void CMPO_PPs(struct PPC_DFP *dfp)
{
@ -513,8 +518,8 @@ static void CMPO_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXVC(dfp);
}
DFP_HELPER_BF_AB(dcmpo, decNumberCompare, CMPO_PPs, 64)
DFP_HELPER_BF_AB(dcmpoq, decNumberCompare, CMPO_PPs, 128)
DFP_HELPER_BF_AB(DCMPO, decNumberCompare, CMPO_PPs, 64)
DFP_HELPER_BF_AB(DCMPOQ, decNumberCompare, CMPO_PPs, 128)
#define DFP_HELPER_TSTDC(op, size) \
uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \
@ -541,8 +546,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \
return dfp.crbf; \
}
DFP_HELPER_TSTDC(dtstdc, 64)
DFP_HELPER_TSTDC(dtstdcq, 128)
DFP_HELPER_TSTDC(DTSTDC, 64)
DFP_HELPER_TSTDC(DTSTDCQ, 128)
#define DFP_HELPER_TSTDG(op, size) \
uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \
@ -596,8 +601,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \
return dfp.crbf; \
}
DFP_HELPER_TSTDG(dtstdg, 64)
DFP_HELPER_TSTDG(dtstdgq, 128)
DFP_HELPER_TSTDG(DTSTDG, 64)
DFP_HELPER_TSTDG(DTSTDGQ, 128)
#define DFP_HELPER_TSTEX(op, size) \
uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
@ -628,8 +633,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
return dfp.crbf; \
}
DFP_HELPER_TSTEX(dtstex, 64)
DFP_HELPER_TSTEX(dtstexq, 128)
DFP_HELPER_TSTEX(DTSTEX, 64)
DFP_HELPER_TSTEX(DTSTEXQ, 128)
#define DFP_HELPER_TSTSF(op, size) \
uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
@ -665,8 +670,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
return dfp.crbf; \
}
DFP_HELPER_TSTSF(dtstsf, 64)
DFP_HELPER_TSTSF(dtstsfq, 128)
DFP_HELPER_TSTSF(DTSTSF, 64)
DFP_HELPER_TSTSF(DTSTSFQ, 128)
#define DFP_HELPER_TSTSFI(op, size) \
uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \
@ -700,8 +705,8 @@ uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \
return dfp.crbf; \
}
DFP_HELPER_TSTSFI(dtstsfi, 64)
DFP_HELPER_TSTSFI(dtstsfiq, 128)
DFP_HELPER_TSTSFI(DTSTSFI, 64)
DFP_HELPER_TSTSFI(DTSTSFIQ, 128)
static void QUA_PPs(struct PPC_DFP *dfp)
{
@ -746,8 +751,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
set_dfp##size(t, &dfp.vt); \
}
DFP_HELPER_QUAI(dquai, 64)
DFP_HELPER_QUAI(dquaiq, 128)
DFP_HELPER_QUAI(DQUAI, 64)
DFP_HELPER_QUAI(DQUAIQ, 128)
#define DFP_HELPER_QUA(op, size) \
void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
@ -764,8 +769,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
set_dfp##size(t, &dfp.vt); \
}
DFP_HELPER_QUA(dqua, 64)
DFP_HELPER_QUA(dquaq, 128)
DFP_HELPER_QUA(DQUA, 64)
DFP_HELPER_QUA(DQUAQ, 128)
static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax,
struct PPC_DFP *dfp)
@ -842,8 +847,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
set_dfp##size(t, &dfp.vt); \
}
DFP_HELPER_RRND(drrnd, 64)
DFP_HELPER_RRND(drrndq, 128)
DFP_HELPER_RRND(DRRND, 64)
DFP_HELPER_RRND(DRRNDQ, 128)
#define DFP_HELPER_RINT(op, postprocs, size) \
void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
@ -868,8 +873,8 @@ static void RINTX_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXSNAN(dfp);
}
DFP_HELPER_RINT(drintx, RINTX_PPs, 64)
DFP_HELPER_RINT(drintxq, RINTX_PPs, 128)
DFP_HELPER_RINT(DRINTX, RINTX_PPs, 64)
DFP_HELPER_RINT(DRINTXQ, RINTX_PPs, 128)
static void RINTN_PPs(struct PPC_DFP *dfp)
{
@ -877,10 +882,10 @@ static void RINTN_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXSNAN(dfp);
}
DFP_HELPER_RINT(drintn, RINTN_PPs, 64)
DFP_HELPER_RINT(drintnq, RINTN_PPs, 128)
DFP_HELPER_RINT(DRINTN, RINTN_PPs, 64)
DFP_HELPER_RINT(DRINTNQ, RINTN_PPs, 128)
void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
void helper_DCTDP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
{
struct PPC_DFP dfp;
ppc_vsr_t vb;
@ -896,7 +901,7 @@ void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
dfp_set_FPRF_from_FRT(&dfp);
}
void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
void helper_DCTQPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
{
struct PPC_DFP dfp;
ppc_vsr_t vb;
@ -911,7 +916,7 @@ void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
set_dfp128(t, &dfp.vt);
}
void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
void helper_DRSP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
{
struct PPC_DFP dfp;
uint32_t t_short = 0;
@ -929,7 +934,7 @@ void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
set_dfp64(t, &vt);
}
void helper_drdpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
void helper_DRDPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
{
struct PPC_DFP dfp;
dfp_prepare_decimal128(&dfp, 0, b, env);
@ -967,8 +972,20 @@ static void CFFIX_PPs(struct PPC_DFP *dfp)
dfp_check_for_XX(dfp);
}
DFP_HELPER_CFFIX(dcffix, 64)
DFP_HELPER_CFFIX(dcffixq, 128)
DFP_HELPER_CFFIX(DCFFIX, 64)
DFP_HELPER_CFFIX(DCFFIXQ, 128)
void helper_DCFFIXQQ(CPUPPCState *env, ppc_fprp_t *t, ppc_avr_t *b)
{
struct PPC_DFP dfp;
dfp_prepare_decimal128(&dfp, NULL, NULL, env);
decNumberFromInt128(&dfp.t, (uint64_t)b->VsrD(1), (int64_t)b->VsrD(0));
dfp_finalize_decimal128(&dfp);
CFFIX_PPs(&dfp);
set_dfp128(t, &dfp.vt);
}
#define DFP_HELPER_CTFIX(op, size) \
void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
@ -1005,8 +1022,55 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
set_dfp64(t, &dfp.vt); \
}
DFP_HELPER_CTFIX(dctfix, 64)
DFP_HELPER_CTFIX(dctfixq, 128)
DFP_HELPER_CTFIX(DCTFIX, 64)
DFP_HELPER_CTFIX(DCTFIXQ, 128)
void helper_DCTFIXQQ(CPUPPCState *env, ppc_avr_t *t, ppc_fprp_t *b)
{
struct PPC_DFP dfp;
dfp_prepare_decimal128(&dfp, 0, b, env);
if (unlikely(decNumberIsSpecial(&dfp.b))) {
uint64_t invalid_flags = FP_VX | FP_VXCVI;
if (decNumberIsInfinite(&dfp.b)) {
if (decNumberIsNegative(&dfp.b)) {
dfp.vt.VsrD(0) = INT64_MIN;
dfp.vt.VsrD(1) = 0;
} else {
dfp.vt.VsrD(0) = INT64_MAX;
dfp.vt.VsrD(1) = UINT64_MAX;
}
} else { /* NaN */
dfp.vt.VsrD(0) = INT64_MIN;
dfp.vt.VsrD(1) = 0;
if (decNumberIsSNaN(&dfp.b)) {
invalid_flags |= FP_VXSNAN;
}
}
dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE);
} else if (unlikely(decNumberIsZero(&dfp.b))) {
dfp.vt.VsrD(0) = 0;
dfp.vt.VsrD(1) = 0;
} else {
decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context);
decNumberIntegralToInt128(&dfp.b, &dfp.context,
&dfp.vt.VsrD(1), &dfp.vt.VsrD(0));
if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) {
if (decNumberIsNegative(&dfp.b)) {
dfp.vt.VsrD(0) = INT64_MIN;
dfp.vt.VsrD(1) = 0;
} else {
dfp.vt.VsrD(0) = INT64_MAX;
dfp.vt.VsrD(1) = UINT64_MAX;
}
dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE);
} else {
dfp_check_for_XX(&dfp);
}
}
set_dfp128_to_avr(t, &dfp.vt);
}
static inline void dfp_set_bcd_digit_64(ppc_vsr_t *t, uint8_t digit,
unsigned n)
@ -1067,8 +1131,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
set_dfp##size(t, &dfp.vt); \
}
DFP_HELPER_DEDPD(ddedpd, 64)
DFP_HELPER_DEDPD(ddedpdq, 128)
DFP_HELPER_DEDPD(DDEDPD, 64)
DFP_HELPER_DEDPD(DDEDPDQ, 128)
static inline uint8_t dfp_get_bcd_digit_64(ppc_vsr_t *t, unsigned n)
{
@ -1135,8 +1199,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
set_dfp##size(t, &dfp.vt); \
}
DFP_HELPER_ENBCD(denbcd, 64)
DFP_HELPER_ENBCD(denbcdq, 128)
DFP_HELPER_ENBCD(DENBCD, 64)
DFP_HELPER_ENBCD(DENBCDQ, 128)
#define DFP_HELPER_XEX(op, size) \
void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
@ -1169,8 +1233,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
} \
}
DFP_HELPER_XEX(dxex, 64)
DFP_HELPER_XEX(dxexq, 128)
DFP_HELPER_XEX(DXEX, 64)
DFP_HELPER_XEX(DXEXQ, 128)
static void dfp_set_raw_exp_64(ppc_vsr_t *t, uint64_t raw)
{
@ -1235,8 +1299,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
set_dfp##size(t, &dfp.vt); \
}
DFP_HELPER_IEX(diex, 64)
DFP_HELPER_IEX(diexq, 128)
DFP_HELPER_IEX(DIEX, 64)
DFP_HELPER_IEX(DIEXQ, 128)
static void dfp_clear_lmd_from_g5msb(uint64_t *t)
{
@ -1323,7 +1387,7 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
set_dfp##size(t, &dfp.vt); \
}
DFP_HELPER_SHIFT(dscli, 64, 1)
DFP_HELPER_SHIFT(dscliq, 128, 1)
DFP_HELPER_SHIFT(dscri, 64, 0)
DFP_HELPER_SHIFT(dscriq, 128, 0)
DFP_HELPER_SHIFT(DSCLI, 64, 1)
DFP_HELPER_SHIFT(DSCLIQ, 128, 1)
DFP_HELPER_SHIFT(DSCRI, 64, 0)
DFP_HELPER_SHIFT(DSCRIQ, 128, 0)

View File

@ -46,7 +46,9 @@ DEF_HELPER_4(divwe, tl, env, tl, tl, i32)
DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
DEF_HELPER_3(sraw, tl, env, tl, tl)
DEF_HELPER_FLAGS_2(cfuged, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_FLAGS_2(CFUGED, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_FLAGS_2(PDEPD, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_FLAGS_2(PEXTD, TCG_CALL_NO_RWG_SE, i64, i64, i64)
#if defined(TARGET_PPC64)
DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl)
DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl)
@ -222,10 +224,10 @@ DEF_HELPER_3(vextractub, void, avr, avr, i32)
DEF_HELPER_3(vextractuh, void, avr, avr, i32)
DEF_HELPER_3(vextractuw, void, avr, avr, i32)
DEF_HELPER_3(vextractd, void, avr, avr, i32)
DEF_HELPER_3(vinsertb, void, avr, avr, i32)
DEF_HELPER_3(vinserth, void, avr, avr, i32)
DEF_HELPER_3(vinsertw, void, avr, avr, i32)
DEF_HELPER_3(vinsertd, void, avr, avr, i32)
DEF_HELPER_4(VINSBLX, void, env, avr, i64, tl)
DEF_HELPER_4(VINSHLX, void, env, avr, i64, tl)
DEF_HELPER_4(VINSWLX, void, env, avr, i64, tl)
DEF_HELPER_4(VINSDLX, void, env, avr, i64, tl)
DEF_HELPER_2(vextsb2w, void, avr, avr)
DEF_HELPER_2(vextsh2w, void, avr, avr)
DEF_HELPER_2(vextsb2d, void, avr, avr)
@ -332,6 +334,10 @@ DEF_HELPER_2(vextuwlx, tl, tl, avr)
DEF_HELPER_2(vextubrx, tl, tl, avr)
DEF_HELPER_2(vextuhrx, tl, tl, avr)
DEF_HELPER_2(vextuwrx, tl, tl, avr)
DEF_HELPER_5(VEXTDUBVLX, void, env, avr, avr, avr, tl)
DEF_HELPER_5(VEXTDUHVLX, void, env, avr, avr, avr, tl)
DEF_HELPER_5(VEXTDUWVLX, void, env, avr, avr, avr, tl)
DEF_HELPER_5(VEXTDDVLX, void, env, avr, avr, avr, tl)
DEF_HELPER_2(vsbox, void, avr, avr)
DEF_HELPER_3(vcipher, void, avr, avr, avr)
@ -514,6 +520,10 @@ DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32)
DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32)
DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr)
DEF_HELPER_5(XXBLENDVB, void, vsr, vsr, vsr, vsr, i32)
DEF_HELPER_5(XXBLENDVH, void, vsr, vsr, vsr, vsr, i32)
DEF_HELPER_5(XXBLENDVW, void, vsr, vsr, vsr, vsr, i32)
DEF_HELPER_5(XXBLENDVD, void, vsr, vsr, vsr, vsr, i32)
DEF_HELPER_2(efscfsi, i32, env, i32)
DEF_HELPER_2(efscfui, i32, env, i32)
@ -696,58 +706,60 @@ DEF_HELPER_3(store_601_batu, void, env, i32, tl)
#define dh_alias_fprp ptr
#define dh_ctype_fprp ppc_fprp_t *
DEF_HELPER_4(dadd, void, env, fprp, fprp, fprp)
DEF_HELPER_4(daddq, void, env, fprp, fprp, fprp)
DEF_HELPER_4(dsub, void, env, fprp, fprp, fprp)
DEF_HELPER_4(dsubq, void, env, fprp, fprp, fprp)
DEF_HELPER_4(dmul, void, env, fprp, fprp, fprp)
DEF_HELPER_4(dmulq, void, env, fprp, fprp, fprp)
DEF_HELPER_4(ddiv, void, env, fprp, fprp, fprp)
DEF_HELPER_4(ddivq, void, env, fprp, fprp, fprp)
DEF_HELPER_3(dcmpo, i32, env, fprp, fprp)
DEF_HELPER_3(dcmpoq, i32, env, fprp, fprp)
DEF_HELPER_3(dcmpu, i32, env, fprp, fprp)
DEF_HELPER_3(dcmpuq, i32, env, fprp, fprp)
DEF_HELPER_3(dtstdc, i32, env, fprp, i32)
DEF_HELPER_3(dtstdcq, i32, env, fprp, i32)
DEF_HELPER_3(dtstdg, i32, env, fprp, i32)
DEF_HELPER_3(dtstdgq, i32, env, fprp, i32)
DEF_HELPER_3(dtstex, i32, env, fprp, fprp)
DEF_HELPER_3(dtstexq, i32, env, fprp, fprp)
DEF_HELPER_3(dtstsf, i32, env, fprp, fprp)
DEF_HELPER_3(dtstsfq, i32, env, fprp, fprp)
DEF_HELPER_3(dtstsfi, i32, env, i32, fprp)
DEF_HELPER_3(dtstsfiq, i32, env, i32, fprp)
DEF_HELPER_5(dquai, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(dquaiq, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(dqua, void, env, fprp, fprp, fprp, i32)
DEF_HELPER_5(dquaq, void, env, fprp, fprp, fprp, i32)
DEF_HELPER_5(drrnd, void, env, fprp, fprp, fprp, i32)
DEF_HELPER_5(drrndq, void, env, fprp, fprp, fprp, i32)
DEF_HELPER_5(drintx, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(drintxq, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(drintn, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(drintnq, void, env, fprp, fprp, i32, i32)
DEF_HELPER_3(dctdp, void, env, fprp, fprp)
DEF_HELPER_3(dctqpq, void, env, fprp, fprp)
DEF_HELPER_3(drsp, void, env, fprp, fprp)
DEF_HELPER_3(drdpq, void, env, fprp, fprp)
DEF_HELPER_3(dcffix, void, env, fprp, fprp)
DEF_HELPER_3(dcffixq, void, env, fprp, fprp)
DEF_HELPER_3(dctfix, void, env, fprp, fprp)
DEF_HELPER_3(dctfixq, void, env, fprp, fprp)
DEF_HELPER_4(ddedpd, void, env, fprp, fprp, i32)
DEF_HELPER_4(ddedpdq, void, env, fprp, fprp, i32)
DEF_HELPER_4(denbcd, void, env, fprp, fprp, i32)
DEF_HELPER_4(denbcdq, void, env, fprp, fprp, i32)
DEF_HELPER_3(dxex, void, env, fprp, fprp)
DEF_HELPER_3(dxexq, void, env, fprp, fprp)
DEF_HELPER_4(diex, void, env, fprp, fprp, fprp)
DEF_HELPER_4(diexq, void, env, fprp, fprp, fprp)
DEF_HELPER_4(dscri, void, env, fprp, fprp, i32)
DEF_HELPER_4(dscriq, void, env, fprp, fprp, i32)
DEF_HELPER_4(dscli, void, env, fprp, fprp, i32)
DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32)
DEF_HELPER_4(DADD, void, env, fprp, fprp, fprp)
DEF_HELPER_4(DADDQ, void, env, fprp, fprp, fprp)
DEF_HELPER_4(DSUB, void, env, fprp, fprp, fprp)
DEF_HELPER_4(DSUBQ, void, env, fprp, fprp, fprp)
DEF_HELPER_4(DMUL, void, env, fprp, fprp, fprp)
DEF_HELPER_4(DMULQ, void, env, fprp, fprp, fprp)
DEF_HELPER_4(DDIV, void, env, fprp, fprp, fprp)
DEF_HELPER_4(DDIVQ, void, env, fprp, fprp, fprp)
DEF_HELPER_3(DCMPO, i32, env, fprp, fprp)
DEF_HELPER_3(DCMPOQ, i32, env, fprp, fprp)
DEF_HELPER_3(DCMPU, i32, env, fprp, fprp)
DEF_HELPER_3(DCMPUQ, i32, env, fprp, fprp)
DEF_HELPER_3(DTSTDC, i32, env, fprp, i32)
DEF_HELPER_3(DTSTDCQ, i32, env, fprp, i32)
DEF_HELPER_3(DTSTDG, i32, env, fprp, i32)
DEF_HELPER_3(DTSTDGQ, i32, env, fprp, i32)
DEF_HELPER_3(DTSTEX, i32, env, fprp, fprp)
DEF_HELPER_3(DTSTEXQ, i32, env, fprp, fprp)
DEF_HELPER_3(DTSTSF, i32, env, fprp, fprp)
DEF_HELPER_3(DTSTSFQ, i32, env, fprp, fprp)
DEF_HELPER_3(DTSTSFI, i32, env, i32, fprp)
DEF_HELPER_3(DTSTSFIQ, i32, env, i32, fprp)
DEF_HELPER_5(DQUAI, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(DQUAIQ, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(DQUA, void, env, fprp, fprp, fprp, i32)
DEF_HELPER_5(DQUAQ, void, env, fprp, fprp, fprp, i32)
DEF_HELPER_5(DRRND, void, env, fprp, fprp, fprp, i32)
DEF_HELPER_5(DRRNDQ, void, env, fprp, fprp, fprp, i32)
DEF_HELPER_5(DRINTX, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(DRINTXQ, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(DRINTN, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(DRINTNQ, void, env, fprp, fprp, i32, i32)
DEF_HELPER_3(DCTDP, void, env, fprp, fprp)
DEF_HELPER_3(DCTQPQ, void, env, fprp, fprp)
DEF_HELPER_3(DRSP, void, env, fprp, fprp)
DEF_HELPER_3(DRDPQ, void, env, fprp, fprp)
DEF_HELPER_3(DCFFIX, void, env, fprp, fprp)
DEF_HELPER_3(DCFFIXQ, void, env, fprp, fprp)
DEF_HELPER_3(DCFFIXQQ, void, env, fprp, avr)
DEF_HELPER_3(DCTFIX, void, env, fprp, fprp)
DEF_HELPER_3(DCTFIXQ, void, env, fprp, fprp)
DEF_HELPER_3(DCTFIXQQ, void, env, avr, fprp)
DEF_HELPER_4(DDEDPD, void, env, fprp, fprp, i32)
DEF_HELPER_4(DDEDPDQ, void, env, fprp, fprp, i32)
DEF_HELPER_4(DENBCD, void, env, fprp, fprp, i32)
DEF_HELPER_4(DENBCDQ, void, env, fprp, fprp, i32)
DEF_HELPER_3(DXEX, void, env, fprp, fprp)
DEF_HELPER_3(DXEXQ, void, env, fprp, fprp)
DEF_HELPER_4(DIEX, void, env, fprp, fprp, fprp)
DEF_HELPER_4(DIEXQ, void, env, fprp, fprp, fprp)
DEF_HELPER_4(DSCRI, void, env, fprp, fprp, i32)
DEF_HELPER_4(DSCRIQ, void, env, fprp, fprp, i32)
DEF_HELPER_4(DSCLI, void, env, fprp, fprp, i32)
DEF_HELPER_4(DSCLIQ, void, env, fprp, fprp, i32)
DEF_HELPER_1(tbegin, void, env)
DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env)

View File

@ -24,25 +24,142 @@
@D_bfs ...... bf:3 - l:1 ra:5 imm:s16 &D_bf
@D_bfu ...... bf:3 - l:1 ra:5 imm:16 &D_bf
%dq_si 4:s12 !function=times_16
%dq_rtp 22:4 !function=times_2
@DQ_rtp ...... ....0 ra:5 ............ .... &D rt=%dq_rtp si=%dq_si
%dq_rt_tsx 3:1 21:5
@DQ_TSX ...... ..... ra:5 ............ .... &D si=%dq_si rt=%dq_rt_tsx
%rt_tsxp 21:1 22:4 !function=times_2
@DQ_TSXP ...... ..... ra:5 ............ .... &D si=%dq_si rt=%rt_tsxp
%ds_si 2:s14 !function=times_4
@DS ...... rt:5 ra:5 .............. .. &D si=%ds_si
%ds_rtp 22:4 !function=times_2
@DS_rtp ...... ....0 ra:5 .............. .. &D rt=%ds_rtp si=%ds_si
&DX rt d
%dx_d 6:s10 16:5 0:1
@DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d
&VA vrt vra vrb rc
@VA ...... vrt:5 vra:5 vrb:5 rc:5 ...... &VA
&VN vrt vra vrb sh
@VN ...... vrt:5 vra:5 vrb:5 .. sh:3 ...... &VN
&VX vrt vra vrb
@VX ...... vrt:5 vra:5 vrb:5 .......... . &VX
&VX_uim4 vrt uim vrb
@VX_uim4 ...... vrt:5 . uim:4 vrb:5 ........... &VX_uim4
&X rt ra rb
@X ...... rt:5 ra:5 rb:5 .......... . &X
&X_rc rt ra rb rc:bool
@X_rc ...... rt:5 ra:5 rb:5 .......... rc:1 &X_rc
%x_frtp 22:4 !function=times_2
%x_frap 17:4 !function=times_2
%x_frbp 12:4 !function=times_2
@X_tp_ap_bp_rc ...... ....0 ....0 ....0 .......... rc:1 &X_rc rt=%x_frtp ra=%x_frap rb=%x_frbp
@X_tp_a_bp_rc ...... ....0 ra:5 ....0 .......... rc:1 &X_rc rt=%x_frtp rb=%x_frbp
&X_tb_rc rt rb rc:bool
@X_tb_rc ...... rt:5 ..... rb:5 .......... rc:1 &X_tb_rc
@X_tbp_rc ...... ....0 ..... ....0 .......... rc:1 &X_tb_rc rt=%x_frtp rb=%x_frbp
@X_tp_b_rc ...... ....0 ..... rb:5 .......... rc:1 &X_tb_rc rt=%x_frtp
@X_t_bp_rc ...... rt:5 ..... ....0 .......... rc:1 &X_tb_rc rb=%x_frbp
&X_bi rt bi
@X_bi ...... rt:5 bi:5 ----- .......... - &X_bi
&X_bf bf ra rb
@X_bf ...... bf:3 .. ra:5 rb:5 .......... . &X_bf
@X_bf_ap_bp ...... bf:3 .. ....0 ....0 .......... . &X_bf ra=%x_frap rb=%x_frbp
@X_bf_a_bp ...... bf:3 .. ra:5 ....0 .......... . &X_bf rb=%x_frbp
&X_bf_uim bf uim rb
@X_bf_uim ...... bf:3 . uim:6 rb:5 .......... . &X_bf_uim
@X_bf_uim_bp ...... bf:3 . uim:6 ....0 .......... . &X_bf_uim rb=%x_frbp
&X_bfl bf l:bool ra rb
@X_bfl ...... bf:3 - l:1 ra:5 rb:5 ..........- &X_bfl
%x_xt 0:1 21:5
&X_imm8 xt imm:uint8_t
@X_imm8 ...... ..... .. imm:8 .......... . &X_imm8 xt=%x_xt
&X_uim5 xt uim:uint8_t
@X_uim5 ...... ..... ..... uim:5 .......... . &X_uim5 xt=%x_xt
&X_tb_sp_rc rt rb sp rc:bool
@X_tb_sp_rc ...... rt:5 sp:2 ... rb:5 .......... rc:1 &X_tb_sp_rc
@X_tbp_sp_rc ...... ....0 sp:2 ... ....0 .......... rc:1 &X_tb_sp_rc rt=%x_frtp rb=%x_frbp
&X_tb_s_rc rt rb s:bool rc:bool
@X_tb_s_rc ...... rt:5 s:1 .... rb:5 .......... rc:1 &X_tb_s_rc
@X_tbp_s_rc ...... ....0 s:1 .... ....0 .......... rc:1 &X_tb_s_rc rt=%x_frtp rb=%x_frbp
%x_rt_tsx 0:1 21:5
@X_TSX ...... ..... ra:5 rb:5 .......... . &X rt=%x_rt_tsx
@X_TSXP ...... ..... ra:5 rb:5 .......... . &X rt=%rt_tsxp
&X_frtp_vrb frtp vrb
@X_frtp_vrb ...... ....0 ..... vrb:5 .......... . &X_frtp_vrb frtp=%x_frtp
&X_vrt_frbp vrt frbp
@X_vrt_frbp ...... vrt:5 ..... ....0 .......... . &X_vrt_frbp frbp=%x_frbp
&XX2 xt xb uim:uint8_t
%xx2_xt 0:1 21:5
%xx2_xb 1:1 11:5
@XX2 ...... ..... ... uim:2 ..... ......... .. &XX2 xt=%xx2_xt xb=%xx2_xb
&Z22_bf_fra bf fra dm
@Z22_bf_fra ...... bf:3 .. fra:5 dm:6 ......... . &Z22_bf_fra
%z22_frap 17:4 !function=times_2
@Z22_bf_frap ...... bf:3 .. ....0 dm:6 ......... . &Z22_bf_fra fra=%z22_frap
&Z22_ta_sh_rc rt ra sh rc:bool
@Z22_ta_sh_rc ...... rt:5 ra:5 sh:6 ......... rc:1 &Z22_ta_sh_rc
%z22_frtp 22:4 !function=times_2
@Z22_tap_sh_rc ...... ....0 ....0 sh:6 ......... rc:1 &Z22_ta_sh_rc rt=%z22_frtp ra=%z22_frap
&Z23_tab frt fra frb rmc rc:bool
@Z23_tab ...... frt:5 fra:5 frb:5 rmc:2 ........ rc:1 &Z23_tab
%z23_frtp 22:4 !function=times_2
%z23_frap 17:4 !function=times_2
%z23_frbp 12:4 !function=times_2
@Z23_tabp ...... ....0 ....0 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp fra=%z23_frap frb=%z23_frbp
@Z23_tp_a_bp ...... ....0 fra:5 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp frb=%z23_frbp
&Z23_tb frt frb r:bool rmc rc:bool
@Z23_tb ...... frt:5 .... r:1 frb:5 rmc:2 ........ rc:1 &Z23_tb
@Z23_tbp ...... ....0 .... r:1 ....0 rmc:2 ........ rc:1 &Z23_tb frt=%z23_frtp frb=%z23_frbp
&Z23_te_tb te frt frb rmc rc:bool
@Z23_te_tb ...... frt:5 te:5 frb:5 rmc:2 ........ rc:1 &Z23_te_tb
@Z23_te_tbp ...... ....0 te:5 ....0 rmc:2 ........ rc:1 &Z23_te_tb frt=%z23_frtp frb=%z23_frbp
### Fixed-Point Load Instructions
LBZ 100010 ..... ..... ................ @D
@ -74,6 +191,8 @@ LDU 111010 ..... ..... ..............01 @DS
LDX 011111 ..... ..... ..... 0000010101 - @X
LDUX 011111 ..... ..... ..... 0000110101 - @X
LQ 111000 ..... ..... ............ ---- @DQ_rtp
### Fixed-Point Store Instructions
STB 100110 ..... ..... ................ @D
@ -96,6 +215,8 @@ STDU 111110 ..... ..... ..............01 @DS
STDX 011111 ..... ..... ..... 0010010101 - @X
STDUX 011111 ..... ..... ..... 0010110101 - @X
STQ 111110 ..... ..... ..............10 @DS_rtp
### Fixed-Point Compare Instructions
CMP 011111 ... - . ..... ..... 0000000000 - @X_bfl
@ -113,6 +234,34 @@ ADDPCIS 010011 ..... ..... .......... 00010 . @DX
## Fixed-Point Logical Instructions
CFUGED 011111 ..... ..... ..... 0011011100 - @X
CNTLZDM 011111 ..... ..... ..... 0000111011 - @X
CNTTZDM 011111 ..... ..... ..... 1000111011 - @X
PDEPD 011111 ..... ..... ..... 0010011100 - @X
PEXTD 011111 ..... ..... ..... 0010111100 - @X
### Float-Point Load Instructions
LFS 110000 ..... ..... ................ @D
LFSU 110001 ..... ..... ................ @D
LFSX 011111 ..... ..... ..... 1000010111 - @X
LFSUX 011111 ..... ..... ..... 1000110111 - @X
LFD 110010 ..... ..... ................ @D
LFDU 110011 ..... ..... ................ @D
LFDX 011111 ..... ..... ..... 1001010111 - @X
LFDUX 011111 ..... ..... ..... 1001110111 - @X
### Float-Point Store Instructions
STFS 110100 ..... ...... ............... @D
STFSU 110101 ..... ...... ............... @D
STFSX 011111 ..... ...... .... 1010010111 - @X
STFSUX 011111 ..... ...... .... 1010110111 - @X
STFD 110110 ..... ...... ............... @D
STFDU 110111 ..... ...... ............... @D
STFDX 011111 ..... ...... .... 1011010111 - @X
STFDUX 011111 ..... ...... .... 1011110111 - @X
### Move To/From System Register Instructions
@ -121,6 +270,160 @@ SETBCR 011111 ..... ..... ----- 0110100000 - @X_bi
SETNBC 011111 ..... ..... ----- 0111000000 - @X_bi
SETNBCR 011111 ..... ..... ----- 0111100000 - @X_bi
### Decimal Floating-Point Arithmetic Instructions
DADD 111011 ..... ..... ..... 0000000010 . @X_rc
DADDQ 111111 ..... ..... ..... 0000000010 . @X_tp_ap_bp_rc
DSUB 111011 ..... ..... ..... 1000000010 . @X_rc
DSUBQ 111111 ..... ..... ..... 1000000010 . @X_tp_ap_bp_rc
DMUL 111011 ..... ..... ..... 0000100010 . @X_rc
DMULQ 111111 ..... ..... ..... 0000100010 . @X_tp_ap_bp_rc
DDIV 111011 ..... ..... ..... 1000100010 . @X_rc
DDIVQ 111111 ..... ..... ..... 1000100010 . @X_tp_ap_bp_rc
### Decimal Floating-Point Compare Instructions
DCMPU 111011 ... -- ..... ..... 1010000010 - @X_bf
DCMPUQ 111111 ... -- ..... ..... 1010000010 - @X_bf_ap_bp
DCMPO 111011 ... -- ..... ..... 0010000010 - @X_bf
DCMPOQ 111111 ... -- ..... ..... 0010000010 - @X_bf_ap_bp
### Decimal Floating-Point Test Instructions
DTSTDC 111011 ... -- ..... ...... 011000010 - @Z22_bf_fra
DTSTDCQ 111111 ... -- ..... ...... 011000010 - @Z22_bf_frap
DTSTDG 111011 ... -- ..... ...... 011100010 - @Z22_bf_fra
DTSTDGQ 111111 ... -- ..... ...... 011100010 - @Z22_bf_frap
DTSTEX 111011 ... -- ..... ..... 0010100010 - @X_bf
DTSTEXQ 111111 ... -- ..... ..... 0010100010 - @X_bf_ap_bp
DTSTSF 111011 ... -- ..... ..... 1010100010 - @X_bf
DTSTSFQ 111111 ... -- ..... ..... 1010100010 - @X_bf_a_bp
DTSTSFI 111011 ... - ...... ..... 1010100011 - @X_bf_uim
DTSTSFIQ 111111 ... - ...... ..... 1010100011 - @X_bf_uim_bp
### Decimal Floating-Point Quantum Adjustment Instructions
DQUAI 111011 ..... ..... ..... .. 01000011 . @Z23_te_tb
DQUAIQ 111111 ..... ..... ..... .. 01000011 . @Z23_te_tbp
DQUA 111011 ..... ..... ..... .. 00000011 . @Z23_tab
DQUAQ 111111 ..... ..... ..... .. 00000011 . @Z23_tabp
DRRND 111011 ..... ..... ..... .. 00100011 . @Z23_tab
DRRNDQ 111111 ..... ..... ..... .. 00100011 . @Z23_tp_a_bp
DRINTX 111011 ..... ---- . ..... .. 01100011 . @Z23_tb
DRINTXQ 111111 ..... ---- . ..... .. 01100011 . @Z23_tbp
DRINTN 111011 ..... ---- . ..... .. 11100011 . @Z23_tb
DRINTNQ 111111 ..... ---- . ..... .. 11100011 . @Z23_tbp
### Decimal Floating-Point Conversion Instructions
DCTDP 111011 ..... ----- ..... 0100000010 . @X_tb_rc
DCTQPQ 111111 ..... ----- ..... 0100000010 . @X_tp_b_rc
DRSP 111011 ..... ----- ..... 1100000010 . @X_tb_rc
DRDPQ 111111 ..... ----- ..... 1100000010 . @X_tbp_rc
DCFFIX 111011 ..... ----- ..... 1100100010 . @X_tb_rc
DCFFIXQ 111111 ..... ----- ..... 1100100010 . @X_tp_b_rc
DCFFIXQQ 111111 ..... 00000 ..... 1111100010 - @X_frtp_vrb
DCTFIX 111011 ..... ----- ..... 0100100010 . @X_tb_rc
DCTFIXQ 111111 ..... ----- ..... 0100100010 . @X_t_bp_rc
DCTFIXQQ 111111 ..... 00001 ..... 1111100010 - @X_vrt_frbp
### Decimal Floating-Point Format Instructions
DDEDPD 111011 ..... .. --- ..... 0101000010 . @X_tb_sp_rc
DDEDPDQ 111111 ..... .. --- ..... 0101000010 . @X_tbp_sp_rc
DENBCD 111011 ..... . ---- ..... 1101000010 . @X_tb_s_rc
DENBCDQ 111111 ..... . ---- ..... 1101000010 . @X_tbp_s_rc
DXEX 111011 ..... ----- ..... 0101100010 . @X_tb_rc
DXEXQ 111111 ..... ----- ..... 0101100010 . @X_t_bp_rc
DIEX 111011 ..... ..... ..... 1101100010 . @X_rc
DIEXQ 111111 ..... ..... ..... 1101100010 . @X_tp_a_bp_rc
DSCLI 111011 ..... ..... ...... 001000010 . @Z22_ta_sh_rc
DSCLIQ 111111 ..... ..... ...... 001000010 . @Z22_tap_sh_rc
DSCRI 111011 ..... ..... ...... 001100010 . @Z22_ta_sh_rc
DSCRIQ 111111 ..... ..... ...... 001100010 . @Z22_tap_sh_rc
## Vector Bit Manipulation Instruction
VCFUGED 000100 ..... ..... ..... 10101001101 @VX
VCLZDM 000100 ..... ..... ..... 11110000100 @VX
VCTZDM 000100 ..... ..... ..... 11111000100 @VX
VPDEPD 000100 ..... ..... ..... 10111001101 @VX
VPEXTD 000100 ..... ..... ..... 10110001101 @VX
## Vector Permute and Formatting Instruction
VEXTDUBVLX 000100 ..... ..... ..... ..... 011000 @VA
VEXTDUBVRX 000100 ..... ..... ..... ..... 011001 @VA
VEXTDUHVLX 000100 ..... ..... ..... ..... 011010 @VA
VEXTDUHVRX 000100 ..... ..... ..... ..... 011011 @VA
VEXTDUWVLX 000100 ..... ..... ..... ..... 011100 @VA
VEXTDUWVRX 000100 ..... ..... ..... ..... 011101 @VA
VEXTDDVLX 000100 ..... ..... ..... ..... 011110 @VA
VEXTDDVRX 000100 ..... ..... ..... ..... 011111 @VA
VINSERTB 000100 ..... - .... ..... 01100001101 @VX_uim4
VINSERTH 000100 ..... - .... ..... 01101001101 @VX_uim4
VINSERTW 000100 ..... - .... ..... 01110001101 @VX_uim4
VINSERTD 000100 ..... - .... ..... 01111001101 @VX_uim4
VINSBLX 000100 ..... ..... ..... 01000001111 @VX
VINSBRX 000100 ..... ..... ..... 01100001111 @VX
VINSHLX 000100 ..... ..... ..... 01001001111 @VX
VINSHRX 000100 ..... ..... ..... 01101001111 @VX
VINSWLX 000100 ..... ..... ..... 01010001111 @VX
VINSWRX 000100 ..... ..... ..... 01110001111 @VX
VINSDLX 000100 ..... ..... ..... 01011001111 @VX
VINSDRX 000100 ..... ..... ..... 01111001111 @VX
VINSW 000100 ..... - .... ..... 00011001111 @VX_uim4
VINSD 000100 ..... - .... ..... 00111001111 @VX_uim4
VINSBVLX 000100 ..... ..... ..... 00000001111 @VX
VINSBVRX 000100 ..... ..... ..... 00100001111 @VX
VINSHVLX 000100 ..... ..... ..... 00001001111 @VX
VINSHVRX 000100 ..... ..... ..... 00101001111 @VX
VINSWVLX 000100 ..... ..... ..... 00010001111 @VX
VINSWVRX 000100 ..... ..... ..... 00110001111 @VX
VSLDBI 000100 ..... ..... ..... 00 ... 010110 @VN
VSRDBI 000100 ..... ..... ..... 01 ... 010110 @VN
# VSX Load/Store Instructions
LXV 111101 ..... ..... ............ . 001 @DQ_TSX
STXV 111101 ..... ..... ............ . 101 @DQ_TSX
LXVP 000110 ..... ..... ............ 0000 @DQ_TSXP
STXVP 000110 ..... ..... ............ 0001 @DQ_TSXP
LXVX 011111 ..... ..... ..... 0100 - 01100 . @X_TSX
STXVX 011111 ..... ..... ..... 0110001100 . @X_TSX
LXVPX 011111 ..... ..... ..... 0101001101 - @X_TSXP
STXVPX 011111 ..... ..... ..... 0111001101 - @X_TSXP
## VSX splat instruction
XXSPLTIB 111100 ..... 00 ........ 0101101000 . @X_imm8
XXSPLTW 111100 ..... ---.. ..... 010100100 . . @XX2
## VSX Vector Load Special Value Instruction
LXVKQ 111100 ..... 11111 ..... 0101101000 . @X_uim5

View File

@ -23,6 +23,36 @@
@PLS_D ...... .. ... r:1 .. .................. \
...... rt:5 ra:5 ................ \
&PLS_D si=%pls_si
@8LS_D_TSX ...... .. . .. r:1 .. .................. \
..... rt:6 ra:5 ................ \
&PLS_D si=%pls_si
%rt_tsxp 21:1 22:4 !function=times_2
@8LS_D_TSXP ...... .. . .. r:1 .. .................. \
...... ..... ra:5 ................ \
&PLS_D si=%pls_si rt=%rt_tsxp
# Format 8RR:D
%8rr_si 32:s16 0:16
%8rr_xt 16:1 21:5
&8RR_D_IX xt ix si
@8RR_D_IX ...... .. .... .. .. ................ \
...... ..... ... ix:1 . ................ \
&8RR_D_IX si=%8rr_si xt=%8rr_xt
&8RR_D xt si:int32_t
@8RR_D ...... .. .... .. .. ................ \
...... ..... .... . ................ \
&8RR_D si=%8rr_si xt=%8rr_xt
# Format XX4
&XX4 xt xa xb xc
%xx4_xt 0:1 21:5
%xx4_xa 2:1 16:5
%xx4_xb 1:1 11:5
%xx4_xc 3:1 6:5
@XX4 ........ ........ ........ ........ \
...... ..... ..... ..... ..... .. .... \
&XX4 xt=%xx4_xt xa=%xx4_xa xb=%xx4_xb xc=%xx4_xc
### Fixed-Point Load Instructions
@ -38,6 +68,8 @@ PLWA 000001 00 0--.-- .................. \
101001 ..... ..... ................ @PLS_D
PLD 000001 00 0--.-- .................. \
111001 ..... ..... ................ @PLS_D
PLQ 000001 00 0--.-- .................. \
111000 ..... ..... ................ @PLS_D
### Fixed-Point Store Instructions
@ -50,12 +82,25 @@ PSTH 000001 10 0--.-- .................. \
PSTD 000001 00 0--.-- .................. \
111101 ..... ..... ................ @PLS_D
PSTQ 000001 00 0--.-- .................. \
111100 ..... ..... ................ @PLS_D
### Fixed-Point Arithmetic Instructions
PADDI 000001 10 0--.-- .................. \
001110 ..... ..... ................ @PLS_D
### Float-Point Load and Store Instructions
PLFS 000001 10 0--.-- .................. \
110000 ..... ..... ................ @PLS_D
PLFD 000001 10 0--.-- .................. \
110010 ..... ..... ................ @PLS_D
PSTFS 000001 10 0--.-- .................. \
110100 ..... ..... ................ @PLS_D
PSTFD 000001 10 0--.-- .................. \
110110 ..... ..... ................ @PLS_D
### Prefixed No-operation Instruction
@PNOP 000001 11 0000-- 000000000000000000 \
@ -122,3 +167,30 @@ PADDI 000001 10 0--.-- .................. \
PNOP ................................ \
-------------------------------- @PNOP
}
### VSX instructions
PLXV 000001 00 0--.-- .................. \
11001 ...... ..... ................ @8LS_D_TSX
PSTXV 000001 00 0--.-- .................. \
11011 ...... ..... ................ @8LS_D_TSX
PLXVP 000001 00 0--.-- .................. \
111010 ..... ..... ................ @8LS_D_TSXP
PSTXVP 000001 00 0--.-- .................. \
111110 ..... ..... ................ @8LS_D_TSXP
XXSPLTIDP 000001 01 0000 -- -- ................ \
100000 ..... 0010 . ................ @8RR_D
XXSPLTIW 000001 01 0000 -- -- ................ \
100000 ..... 0011 . ................ @8RR_D
XXSPLTI32DX 000001 01 0000 -- -- ................ \
100000 ..... 000 .. ................ @8RR_D_IX
XXBLENDVD 000001 01 0000 -- ------------------ \
100001 ..... ..... ..... ..... 11 .... @XX4
XXBLENDVW 000001 01 0000 -- ------------------ \
100001 ..... ..... ..... ..... 10 .... @XX4
XXBLENDVH 000001 01 0000 -- ------------------ \
100001 ..... ..... ..... ..... 01 .... @XX4
XXBLENDVB 000001 01 0000 -- ------------------ \
100001 ..... ..... ..... ..... 00 .... @XX4

View File

@ -324,7 +324,7 @@ target_ulong helper_popcntb(target_ulong val)
}
#endif
uint64_t helper_cfuged(uint64_t src, uint64_t mask)
uint64_t helper_CFUGED(uint64_t src, uint64_t mask)
{
/*
* Instead of processing the mask bit-by-bit from the most significant to
@ -386,6 +386,42 @@ uint64_t helper_cfuged(uint64_t src, uint64_t mask)
return left | (right >> n);
}
uint64_t helper_PDEPD(uint64_t src, uint64_t mask)
{
int i, o;
uint64_t result = 0;
if (mask == -1) {
return src;
}
for (i = 0; mask != 0; i++) {
o = ctz64(mask);
mask &= mask - 1;
result |= ((src >> i) & 1) << o;
}
return result;
}
uint64_t helper_PEXTD(uint64_t src, uint64_t mask)
{
int i, o;
uint64_t result = 0;
if (mask == -1) {
return src;
}
for (o = 0; mask != 0; o++) {
i = ctz64(mask);
mask &= mask - 1;
result |= ((src >> i) & 1) << o;
}
return result;
}
/*****************************************************************************/
/* PowerPC 601 specific instructions (POWER bridge) */
target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2)
@ -1577,25 +1613,73 @@ void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
}
#if defined(HOST_WORDS_BIGENDIAN)
#define VINSERT(suffix, element) \
void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
{ \
memmove(&r->u8[index], &b->u8[8 - sizeof(r->element[0])], \
sizeof(r->element[0])); \
}
#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[IDX])
#else
#define VINSERT(suffix, element) \
void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
{ \
uint32_t d = (16 - index) - sizeof(r->element[0]); \
memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \
}
#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[15 - (IDX)] - (SIZE) + 1)
#endif
VINSERT(b, u8)
VINSERT(h, u16)
VINSERT(w, u32)
VINSERT(d, u64)
#undef VINSERT
#define VINSX(SUFFIX, TYPE) \
void glue(glue(helper_VINS, SUFFIX), LX)(CPUPPCState *env, ppc_avr_t *t, \
uint64_t val, target_ulong index) \
{ \
const int maxidx = ARRAY_SIZE(t->u8) - sizeof(TYPE); \
target_long idx = index; \
\
if (idx < 0 || idx > maxidx) { \
idx = idx < 0 ? sizeof(TYPE) - idx : idx; \
qemu_log_mask(LOG_GUEST_ERROR, \
"Invalid index for Vector Insert Element after 0x" TARGET_FMT_lx \
", RA = " TARGET_FMT_ld " > %d\n", env->nip, idx, maxidx); \
} else { \
TYPE src = val; \
memcpy(ELEM_ADDR(t, idx, sizeof(TYPE)), &src, sizeof(TYPE)); \
} \
}
VINSX(B, uint8_t)
VINSX(H, uint16_t)
VINSX(W, uint32_t)
VINSX(D, uint64_t)
#undef ELEM_ADDR
#undef VINSX
#if defined(HOST_WORDS_BIGENDIAN)
#define VEXTDVLX(NAME, SIZE) \
void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
target_ulong index) \
{ \
const target_long idx = index; \
ppc_avr_t tmp[2] = { *a, *b }; \
memset(t, 0, sizeof(*t)); \
if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \
memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2 - SIZE], (void *)tmp + idx, SIZE); \
} else { \
qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \
TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \
env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \
} \
}
#else
#define VEXTDVLX(NAME, SIZE) \
void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
target_ulong index) \
{ \
const target_long idx = index; \
ppc_avr_t tmp[2] = { *b, *a }; \
memset(t, 0, sizeof(*t)); \
if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \
memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2], \
(void *)tmp + sizeof(tmp) - SIZE - idx, SIZE); \
} else { \
qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \
TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \
env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \
} \
}
#endif
VEXTDVLX(VEXTDUBVLX, 1)
VEXTDVLX(VEXTDUHVLX, 2)
VEXTDVLX(VEXTDUWVLX, 4)
VEXTDVLX(VEXTDDVLX, 8)
#undef VEXTDVLX
#if defined(HOST_WORDS_BIGENDIAN)
#define VEXTRACT(suffix, element) \
void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
@ -1653,6 +1737,21 @@ void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt,
*xt = t;
}
#define XXBLEND(name, sz) \
void glue(helper_XXBLENDV, name)(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
ppc_avr_t *c, uint32_t desc) \
{ \
for (int i = 0; i < ARRAY_SIZE(t->glue(u, sz)); i++) { \
t->glue(u, sz)[i] = (c->glue(s, sz)[i] >> (sz - 1)) ? \
b->glue(u, sz)[i] : a->glue(u, sz)[i]; \
} \
}
XXBLEND(B, 8)
XXBLEND(H, 16)
XXBLEND(W, 32)
XXBLEND(D, 64)
#undef XXBLEND
#define VEXT_SIGNED(name, element, cast) \
void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
{ \

View File

@ -3197,6 +3197,20 @@ static inline void gen_align_no_le(DisasContext *ctx)
(ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
}
static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
{
TCGv ea = tcg_temp_new();
if (ra) {
tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
} else {
tcg_gen_mov_tl(ea, displ);
}
if (NARROW_MODE(ctx)) {
tcg_gen_ext32u_tl(ea, ea);
}
return ea;
}
/*** Integer load ***/
#define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
#define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
@ -3313,69 +3327,6 @@ GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
/* lq */
static void gen_lq(DisasContext *ctx)
{
int ra, rd;
TCGv EA, hi, lo;
/* lq is a legal user mode instruction starting in ISA 2.07 */
bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
if (!legal_in_user_mode && ctx->pr) {
gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
if (!le_is_supported && ctx->le_mode) {
gen_align_no_le(ctx);
return;
}
ra = rA(ctx->opcode);
rd = rD(ctx->opcode);
if (unlikely((rd & 1) || rd == ra)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x0F);
/* Note that the low part is always in RD+1, even in LE mode. */
lo = cpu_gpr[rd + 1];
hi = cpu_gpr[rd];
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
if (HAVE_ATOMIC128) {
TCGv_i32 oi = tcg_temp_new_i32();
if (ctx->le_mode) {
tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
} else {
tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
}
tcg_temp_free_i32(oi);
tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
} else {
/* Restart with exclusive lock. */
gen_helper_exit_atomic(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
}
} else if (ctx->le_mode) {
tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ);
gen_addr_add(ctx, EA, EA, 8);
tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
} else {
tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ);
gen_addr_add(ctx, EA, EA, 8);
tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
}
tcg_temp_free(EA);
}
#endif
/*** Integer store ***/
@ -3421,90 +3372,6 @@ GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
static void gen_std(DisasContext *ctx)
{
int rs;
TCGv EA;
rs = rS(ctx->opcode);
if ((ctx->opcode & 0x3) == 0x2) { /* stq */
bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
TCGv hi, lo;
if (!(ctx->insns_flags & PPC_64BX)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
}
if (!legal_in_user_mode && ctx->pr) {
gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
if (!le_is_supported && ctx->le_mode) {
gen_align_no_le(ctx);
return;
}
if (unlikely(rs & 1)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
/* Note that the low part is always in RS+1, even in LE mode. */
lo = cpu_gpr[rs + 1];
hi = cpu_gpr[rs];
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
if (HAVE_ATOMIC128) {
TCGv_i32 oi = tcg_temp_new_i32();
if (ctx->le_mode) {
tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128,
ctx->mem_idx));
gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
} else {
tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128,
ctx->mem_idx));
gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
}
tcg_temp_free_i32(oi);
} else {
/* Restart with exclusive lock. */
gen_helper_exit_atomic(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
}
} else if (ctx->le_mode) {
tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_LEQ);
gen_addr_add(ctx, EA, EA, 8);
tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_LEQ);
} else {
tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_BEQ);
gen_addr_add(ctx, EA, EA, 8);
tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_BEQ);
}
tcg_temp_free(EA);
} else {
/* std / stdu */
if (Rc(ctx->opcode)) {
if (unlikely(rA(ctx->opcode) == 0)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
}
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
if (Rc(ctx->opcode)) {
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
}
tcg_temp_free(EA);
}
}
#endif
/*** Integer load and store with byte reverse ***/
@ -7438,11 +7305,21 @@ static inline void set_avr64(int regno, TCGv_i64 src, bool high)
/*
* Helpers for decodetree used by !function for decoding arguments.
*/
static int times_2(DisasContext *ctx, int x)
{
return x * 2;
}
static int times_4(DisasContext *ctx, int x)
{
return x * 4;
}
static int times_16(DisasContext *ctx, int x)
{
return x * 16;
}
/*
* Helpers for trans_* functions to check for specific insns flags.
* Use token pasting to ensure that we use the proper flag with the
@ -7469,6 +7346,30 @@ static int times_4(DisasContext *ctx, int x)
# define REQUIRE_64BIT(CTX) REQUIRE_INSNS_FLAGS(CTX, 64B)
#endif
#define REQUIRE_VECTOR(CTX) \
do { \
if (unlikely(!(CTX)->altivec_enabled)) { \
gen_exception((CTX), POWERPC_EXCP_VPU); \
return true; \
} \
} while (0)
#define REQUIRE_VSX(CTX) \
do { \
if (unlikely(!(CTX)->vsx_enabled)) { \
gen_exception((CTX), POWERPC_EXCP_VSXU); \
return true; \
} \
} while (0)
#define REQUIRE_FPU(ctx) \
do { \
if (unlikely(!(ctx)->fpu_enabled)) { \
gen_exception((ctx), POWERPC_EXCP_FPU); \
return true; \
} \
} while (0)
/*
* Helpers for implementing sets of trans_* functions.
* Defer the implementation of NAME to FUNC, with optional extra arguments.
@ -7488,6 +7389,25 @@ static int times_4(DisasContext *ctx, int x)
#include "decode-insn64.c.inc"
#include "power8-pmu-regs.c.inc"
/*
* Incorporate CIA into the constant when R=1.
* Validate that when R=1, RA=0.
*/
static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
{
d->rt = a->rt;
d->ra = a->ra;
d->si = a->si;
if (a->r) {
if (unlikely(a->ra != 0)) {
gen_invalid(ctx);
return false;
}
d->si += ctx->cia;
}
return true;
}
#include "translate/fixedpoint-impl.c.inc"
#include "translate/fp-impl.c.inc"
@ -7495,7 +7415,6 @@ static int times_4(DisasContext *ctx, int x)
#include "translate/vmx-impl.c.inc"
#include "translate/vsx-impl.c.inc"
#include "translate/vector-impl.c.inc"
#include "translate/dfp-impl.c.inc"
@ -7527,20 +7446,7 @@ static void gen_dform39(DisasContext *ctx)
/* handles stfdp, lxv, stxsd, stxssp lxvx */
static void gen_dform3D(DisasContext *ctx)
{
if ((ctx->opcode & 3) == 1) { /* DQ-FORM */
switch (ctx->opcode & 0x7) {
case 1: /* lxv */
if (ctx->insns_flags2 & PPC2_ISA300) {
return gen_lxv(ctx);
}
break;
case 5: /* stxv */
if (ctx->insns_flags2 & PPC2_ISA300) {
return gen_stxv(ctx);
}
break;
}
} else { /* DS-FORM */
if ((ctx->opcode & 3) != 1) { /* DS-FORM */
switch (ctx->opcode & 0x3) {
case 0: /* stfdp */
if (ctx->insns_flags2 & PPC2_ISA205) {
@ -7663,13 +7569,9 @@ GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
PPC_NONE, PPC2_ISA300),
#endif
#if defined(TARGET_PPC64)
GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX),
GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B),
#endif
/* handles lfdp, lxsd, lxssp */
GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
/* handles stfdp, lxv, stxsd, stxssp, stxv */
/* handles stfdp, stxsd, stxssp */
GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
@ -8171,8 +8073,6 @@ GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
#include "translate/vsx-ops.c.inc"
#include "translate/dfp-ops.c.inc"
#include "translate/spe-ops.c.inc"
};

View File

@ -7,226 +7,223 @@ static inline TCGv_ptr gen_fprp_ptr(int reg)
return r;
}
#define GEN_DFP_T_A_B_Rc(name) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv_ptr rd, ra, rb; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_update_nip(ctx, ctx->base.pc_next - 4); \
rd = gen_fprp_ptr(rD(ctx->opcode)); \
ra = gen_fprp_ptr(rA(ctx->opcode)); \
rb = gen_fprp_ptr(rB(ctx->opcode)); \
gen_helper_##name(cpu_env, rd, ra, rb); \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
tcg_temp_free_ptr(rd); \
tcg_temp_free_ptr(ra); \
tcg_temp_free_ptr(rb); \
#define TRANS_DFP_T_A_B_Rc(NAME) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ \
TCGv_ptr rt, ra, rb; \
REQUIRE_INSNS_FLAGS2(ctx, DFP); \
REQUIRE_FPU(ctx); \
rt = gen_fprp_ptr(a->rt); \
ra = gen_fprp_ptr(a->ra); \
rb = gen_fprp_ptr(a->rb); \
gen_helper_##NAME(cpu_env, rt, ra, rb); \
if (unlikely(a->rc)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
tcg_temp_free_ptr(rt); \
tcg_temp_free_ptr(ra); \
tcg_temp_free_ptr(rb); \
return true; \
}
#define GEN_DFP_BF_A_B(name) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv_ptr ra, rb; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_update_nip(ctx, ctx->base.pc_next - 4); \
ra = gen_fprp_ptr(rA(ctx->opcode)); \
rb = gen_fprp_ptr(rB(ctx->opcode)); \
gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
cpu_env, ra, rb); \
tcg_temp_free_ptr(ra); \
tcg_temp_free_ptr(rb); \
#define TRANS_DFP_BF_A_B(NAME) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ \
TCGv_ptr ra, rb; \
REQUIRE_INSNS_FLAGS2(ctx, DFP); \
REQUIRE_FPU(ctx); \
ra = gen_fprp_ptr(a->ra); \
rb = gen_fprp_ptr(a->rb); \
gen_helper_##NAME(cpu_crf[a->bf], \
cpu_env, ra, rb); \
tcg_temp_free_ptr(ra); \
tcg_temp_free_ptr(rb); \
return true; \
}
#define GEN_DFP_BF_I_B(name) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv_i32 uim; \
TCGv_ptr rb; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_update_nip(ctx, ctx->base.pc_next - 4); \
uim = tcg_const_i32(UIMM5(ctx->opcode)); \
rb = gen_fprp_ptr(rB(ctx->opcode)); \
gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
cpu_env, uim, rb); \
tcg_temp_free_i32(uim); \
tcg_temp_free_ptr(rb); \
#define TRANS_DFP_BF_I_B(NAME) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ \
TCGv_ptr rb; \
REQUIRE_INSNS_FLAGS2(ctx, DFP); \
REQUIRE_FPU(ctx); \
rb = gen_fprp_ptr(a->rb); \
gen_helper_##NAME(cpu_crf[a->bf], \
cpu_env, tcg_constant_i32(a->uim), rb);\
tcg_temp_free_ptr(rb); \
return true; \
}
#define GEN_DFP_BF_A_DCM(name) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv_ptr ra; \
TCGv_i32 dcm; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_update_nip(ctx, ctx->base.pc_next - 4); \
ra = gen_fprp_ptr(rA(ctx->opcode)); \
dcm = tcg_const_i32(DCM(ctx->opcode)); \
gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
cpu_env, ra, dcm); \
tcg_temp_free_ptr(ra); \
tcg_temp_free_i32(dcm); \
#define TRANS_DFP_BF_A_DCM(NAME) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ \
TCGv_ptr ra; \
REQUIRE_INSNS_FLAGS2(ctx, DFP); \
REQUIRE_FPU(ctx); \
ra = gen_fprp_ptr(a->fra); \
gen_helper_##NAME(cpu_crf[a->bf], \
cpu_env, ra, tcg_constant_i32(a->dm)); \
tcg_temp_free_ptr(ra); \
return true; \
}
#define GEN_DFP_T_B_U32_U32_Rc(name, u32f1, u32f2) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv_ptr rt, rb; \
TCGv_i32 u32_1, u32_2; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_update_nip(ctx, ctx->base.pc_next - 4); \
rt = gen_fprp_ptr(rD(ctx->opcode)); \
rb = gen_fprp_ptr(rB(ctx->opcode)); \
u32_1 = tcg_const_i32(u32f1(ctx->opcode)); \
u32_2 = tcg_const_i32(u32f2(ctx->opcode)); \
gen_helper_##name(cpu_env, rt, rb, u32_1, u32_2); \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
tcg_temp_free_ptr(rt); \
tcg_temp_free_ptr(rb); \
tcg_temp_free_i32(u32_1); \
tcg_temp_free_i32(u32_2); \
#define TRANS_DFP_T_B_U32_U32_Rc(NAME, U32F1, U32F2) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ \
TCGv_ptr rt, rb; \
REQUIRE_INSNS_FLAGS2(ctx, DFP); \
REQUIRE_FPU(ctx); \
rt = gen_fprp_ptr(a->frt); \
rb = gen_fprp_ptr(a->frb); \
gen_helper_##NAME(cpu_env, rt, rb, \
tcg_constant_i32(a->U32F1), \
tcg_constant_i32(a->U32F2)); \
if (unlikely(a->rc)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
tcg_temp_free_ptr(rt); \
tcg_temp_free_ptr(rb); \
return true; \
}
#define GEN_DFP_T_A_B_I32_Rc(name, i32fld) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv_ptr rt, ra, rb; \
TCGv_i32 i32; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_update_nip(ctx, ctx->base.pc_next - 4); \
rt = gen_fprp_ptr(rD(ctx->opcode)); \
ra = gen_fprp_ptr(rA(ctx->opcode)); \
rb = gen_fprp_ptr(rB(ctx->opcode)); \
i32 = tcg_const_i32(i32fld(ctx->opcode)); \
gen_helper_##name(cpu_env, rt, ra, rb, i32); \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
tcg_temp_free_ptr(rt); \
tcg_temp_free_ptr(rb); \
tcg_temp_free_ptr(ra); \
tcg_temp_free_i32(i32); \
}
#define GEN_DFP_T_B_Rc(name) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv_ptr rt, rb; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_update_nip(ctx, ctx->base.pc_next - 4); \
rt = gen_fprp_ptr(rD(ctx->opcode)); \
rb = gen_fprp_ptr(rB(ctx->opcode)); \
gen_helper_##name(cpu_env, rt, rb); \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
tcg_temp_free_ptr(rt); \
tcg_temp_free_ptr(rb); \
}
#define GEN_DFP_T_FPR_I32_Rc(name, fprfld, i32fld) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv_ptr rt, rs; \
TCGv_i32 i32; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_update_nip(ctx, ctx->base.pc_next - 4); \
rt = gen_fprp_ptr(rD(ctx->opcode)); \
rs = gen_fprp_ptr(fprfld(ctx->opcode)); \
i32 = tcg_const_i32(i32fld(ctx->opcode)); \
gen_helper_##name(cpu_env, rt, rs, i32); \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
tcg_temp_free_ptr(rt); \
tcg_temp_free_ptr(rs); \
tcg_temp_free_i32(i32); \
#define TRANS_DFP_T_A_B_I32_Rc(NAME, I32FLD) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ \
TCGv_ptr rt, ra, rb; \
REQUIRE_INSNS_FLAGS2(ctx, DFP); \
REQUIRE_FPU(ctx); \
rt = gen_fprp_ptr(a->frt); \
ra = gen_fprp_ptr(a->fra); \
rb = gen_fprp_ptr(a->frb); \
gen_helper_##NAME(cpu_env, rt, ra, rb, \
tcg_constant_i32(a->I32FLD)); \
if (unlikely(a->rc)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
tcg_temp_free_ptr(rt); \
tcg_temp_free_ptr(ra); \
tcg_temp_free_ptr(rb); \
return true; \
}
GEN_DFP_T_A_B_Rc(dadd)
GEN_DFP_T_A_B_Rc(daddq)
GEN_DFP_T_A_B_Rc(dsub)
GEN_DFP_T_A_B_Rc(dsubq)
GEN_DFP_T_A_B_Rc(dmul)
GEN_DFP_T_A_B_Rc(dmulq)
GEN_DFP_T_A_B_Rc(ddiv)
GEN_DFP_T_A_B_Rc(ddivq)
GEN_DFP_BF_A_B(dcmpu)
GEN_DFP_BF_A_B(dcmpuq)
GEN_DFP_BF_A_B(dcmpo)
GEN_DFP_BF_A_B(dcmpoq)
GEN_DFP_BF_A_DCM(dtstdc)
GEN_DFP_BF_A_DCM(dtstdcq)
GEN_DFP_BF_A_DCM(dtstdg)
GEN_DFP_BF_A_DCM(dtstdgq)
GEN_DFP_BF_A_B(dtstex)
GEN_DFP_BF_A_B(dtstexq)
GEN_DFP_BF_A_B(dtstsf)
GEN_DFP_BF_A_B(dtstsfq)
GEN_DFP_BF_I_B(dtstsfi)
GEN_DFP_BF_I_B(dtstsfiq)
GEN_DFP_T_B_U32_U32_Rc(dquai, SIMM5, RMC)
GEN_DFP_T_B_U32_U32_Rc(dquaiq, SIMM5, RMC)
GEN_DFP_T_A_B_I32_Rc(dqua, RMC)
GEN_DFP_T_A_B_I32_Rc(dquaq, RMC)
GEN_DFP_T_A_B_I32_Rc(drrnd, RMC)
GEN_DFP_T_A_B_I32_Rc(drrndq, RMC)
GEN_DFP_T_B_U32_U32_Rc(drintx, FPW, RMC)
GEN_DFP_T_B_U32_U32_Rc(drintxq, FPW, RMC)
GEN_DFP_T_B_U32_U32_Rc(drintn, FPW, RMC)
GEN_DFP_T_B_U32_U32_Rc(drintnq, FPW, RMC)
GEN_DFP_T_B_Rc(dctdp)
GEN_DFP_T_B_Rc(dctqpq)
GEN_DFP_T_B_Rc(drsp)
GEN_DFP_T_B_Rc(drdpq)
GEN_DFP_T_B_Rc(dcffix)
GEN_DFP_T_B_Rc(dcffixq)
GEN_DFP_T_B_Rc(dctfix)
GEN_DFP_T_B_Rc(dctfixq)
GEN_DFP_T_FPR_I32_Rc(ddedpd, rB, SP)
GEN_DFP_T_FPR_I32_Rc(ddedpdq, rB, SP)
GEN_DFP_T_FPR_I32_Rc(denbcd, rB, SP)
GEN_DFP_T_FPR_I32_Rc(denbcdq, rB, SP)
GEN_DFP_T_B_Rc(dxex)
GEN_DFP_T_B_Rc(dxexq)
GEN_DFP_T_A_B_Rc(diex)
GEN_DFP_T_A_B_Rc(diexq)
GEN_DFP_T_FPR_I32_Rc(dscli, rA, DCM)
GEN_DFP_T_FPR_I32_Rc(dscliq, rA, DCM)
GEN_DFP_T_FPR_I32_Rc(dscri, rA, DCM)
GEN_DFP_T_FPR_I32_Rc(dscriq, rA, DCM)
#define TRANS_DFP_T_B_Rc(NAME) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ \
TCGv_ptr rt, rb; \
REQUIRE_INSNS_FLAGS2(ctx, DFP); \
REQUIRE_FPU(ctx); \
rt = gen_fprp_ptr(a->rt); \
rb = gen_fprp_ptr(a->rb); \
gen_helper_##NAME(cpu_env, rt, rb); \
if (unlikely(a->rc)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
tcg_temp_free_ptr(rt); \
tcg_temp_free_ptr(rb); \
return true; \
}
#undef GEN_DFP_T_A_B_Rc
#undef GEN_DFP_BF_A_B
#undef GEN_DFP_BF_A_DCM
#undef GEN_DFP_T_B_U32_U32_Rc
#undef GEN_DFP_T_A_B_I32_Rc
#undef GEN_DFP_T_B_Rc
#undef GEN_DFP_T_FPR_I32_Rc
#define TRANS_DFP_T_FPR_I32_Rc(NAME, FPRFLD, I32FLD) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ \
TCGv_ptr rt, rx; \
REQUIRE_INSNS_FLAGS2(ctx, DFP); \
REQUIRE_FPU(ctx); \
rt = gen_fprp_ptr(a->rt); \
rx = gen_fprp_ptr(a->FPRFLD); \
gen_helper_##NAME(cpu_env, rt, rx, \
tcg_constant_i32(a->I32FLD)); \
if (unlikely(a->rc)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
tcg_temp_free_ptr(rt); \
tcg_temp_free_ptr(rx); \
return true; \
}
TRANS_DFP_T_A_B_Rc(DADD)
TRANS_DFP_T_A_B_Rc(DADDQ)
TRANS_DFP_T_A_B_Rc(DSUB)
TRANS_DFP_T_A_B_Rc(DSUBQ)
TRANS_DFP_T_A_B_Rc(DMUL)
TRANS_DFP_T_A_B_Rc(DMULQ)
TRANS_DFP_T_A_B_Rc(DDIV)
TRANS_DFP_T_A_B_Rc(DDIVQ)
TRANS_DFP_BF_A_B(DCMPU)
TRANS_DFP_BF_A_B(DCMPUQ)
TRANS_DFP_BF_A_B(DCMPO)
TRANS_DFP_BF_A_B(DCMPOQ)
TRANS_DFP_BF_A_DCM(DTSTDC)
TRANS_DFP_BF_A_DCM(DTSTDCQ)
TRANS_DFP_BF_A_DCM(DTSTDG)
TRANS_DFP_BF_A_DCM(DTSTDGQ)
TRANS_DFP_BF_A_B(DTSTEX)
TRANS_DFP_BF_A_B(DTSTEXQ)
TRANS_DFP_BF_A_B(DTSTSF)
TRANS_DFP_BF_A_B(DTSTSFQ)
TRANS_DFP_BF_I_B(DTSTSFI)
TRANS_DFP_BF_I_B(DTSTSFIQ)
TRANS_DFP_T_B_U32_U32_Rc(DQUAI, te, rmc)
TRANS_DFP_T_B_U32_U32_Rc(DQUAIQ, te, rmc)
TRANS_DFP_T_A_B_I32_Rc(DQUA, rmc)
TRANS_DFP_T_A_B_I32_Rc(DQUAQ, rmc)
TRANS_DFP_T_A_B_I32_Rc(DRRND, rmc)
TRANS_DFP_T_A_B_I32_Rc(DRRNDQ, rmc)
TRANS_DFP_T_B_U32_U32_Rc(DRINTX, r, rmc)
TRANS_DFP_T_B_U32_U32_Rc(DRINTXQ, r, rmc)
TRANS_DFP_T_B_U32_U32_Rc(DRINTN, r, rmc)
TRANS_DFP_T_B_U32_U32_Rc(DRINTNQ, r, rmc)
TRANS_DFP_T_B_Rc(DCTDP)
TRANS_DFP_T_B_Rc(DCTQPQ)
TRANS_DFP_T_B_Rc(DRSP)
TRANS_DFP_T_B_Rc(DRDPQ)
TRANS_DFP_T_B_Rc(DCFFIX)
TRANS_DFP_T_B_Rc(DCFFIXQ)
TRANS_DFP_T_B_Rc(DCTFIX)
TRANS_DFP_T_B_Rc(DCTFIXQ)
TRANS_DFP_T_FPR_I32_Rc(DDEDPD, rb, sp)
TRANS_DFP_T_FPR_I32_Rc(DDEDPDQ, rb, sp)
TRANS_DFP_T_FPR_I32_Rc(DENBCD, rb, s)
TRANS_DFP_T_FPR_I32_Rc(DENBCDQ, rb, s)
TRANS_DFP_T_B_Rc(DXEX)
TRANS_DFP_T_B_Rc(DXEXQ)
TRANS_DFP_T_A_B_Rc(DIEX)
TRANS_DFP_T_A_B_Rc(DIEXQ)
TRANS_DFP_T_FPR_I32_Rc(DSCLI, ra, sh)
TRANS_DFP_T_FPR_I32_Rc(DSCLIQ, ra, sh)
TRANS_DFP_T_FPR_I32_Rc(DSCRI, ra, sh)
TRANS_DFP_T_FPR_I32_Rc(DSCRIQ, ra, sh)
static bool trans_DCFFIXQQ(DisasContext *ctx, arg_DCFFIXQQ *a)
{
TCGv_ptr rt, rb;
REQUIRE_INSNS_FLAGS2(ctx, DFP);
REQUIRE_FPU(ctx);
REQUIRE_VECTOR(ctx);
rt = gen_fprp_ptr(a->frtp);
rb = gen_avr_ptr(a->vrb);
gen_helper_DCFFIXQQ(cpu_env, rt, rb);
tcg_temp_free_ptr(rt);
tcg_temp_free_ptr(rb);
return true;
}
static bool trans_DCTFIXQQ(DisasContext *ctx, arg_DCTFIXQQ *a)
{
TCGv_ptr rt, rb;
REQUIRE_INSNS_FLAGS2(ctx, DFP);
REQUIRE_FPU(ctx);
REQUIRE_VECTOR(ctx);
rt = gen_avr_ptr(a->vrt);
rb = gen_fprp_ptr(a->frbp);
gen_helper_DCTFIXQQ(cpu_env, rt, rb);
tcg_temp_free_ptr(rt);
tcg_temp_free_ptr(rb);
return true;
}

View File

@ -1,165 +0,0 @@
#define _GEN_DFP_LONG(name, op1, op2, mask) \
GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_DFP)
#define _GEN_DFP_LONG_300(name, op1, op2, mask) \
GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_ISA300)
#define _GEN_DFP_LONGx2(name, op1, op2, mask) \
GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP)
#define _GEN_DFP_LONGx4(name, op1, op2, mask) \
GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
GEN_HANDLER_E(name, 0x3B, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \
GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \
GEN_HANDLER_E(name, 0x3B, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP)
#define _GEN_DFP_QUAD(name, op1, op2, mask) \
GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_DFP)
#define _GEN_DFP_QUAD_300(name, op1, op2, mask) \
GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_ISA300)
#define _GEN_DFP_QUADx2(name, op1, op2, mask) \
GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP)
#define _GEN_DFP_QUADx4(name, op1, op2, mask) \
GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
GEN_HANDLER_E(name, 0x3F, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \
GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \
GEN_HANDLER_E(name, 0x3F, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP)
#define GEN_DFP_T_A_B_Rc(name, op1, op2) \
_GEN_DFP_LONG(name, op1, op2, 0x00000000)
#define GEN_DFP_Tp_Ap_Bp_Rc(name, op1, op2) \
_GEN_DFP_QUAD(name, op1, op2, 0x00210800)
#define GEN_DFP_Tp_A_Bp_Rc(name, op1, op2) \
_GEN_DFP_QUAD(name, op1, op2, 0x00200800)
#define GEN_DFP_T_B_Rc(name, op1, op2) \
_GEN_DFP_LONG(name, op1, op2, 0x001F0000)
#define GEN_DFP_Tp_Bp_Rc(name, op1, op2) \
_GEN_DFP_QUAD(name, op1, op2, 0x003F0800)
#define GEN_DFP_Tp_B_Rc(name, op1, op2) \
_GEN_DFP_QUAD(name, op1, op2, 0x003F0000)
#define GEN_DFP_T_Bp_Rc(name, op1, op2) \
_GEN_DFP_QUAD(name, op1, op2, 0x001F0800)
#define GEN_DFP_BF_A_B(name, op1, op2) \
_GEN_DFP_LONG(name, op1, op2, 0x00000001)
#define GEN_DFP_BF_A_B_300(name, op1, op2) \
_GEN_DFP_LONG_300(name, op1, op2, 0x00400001)
#define GEN_DFP_BF_Ap_Bp(name, op1, op2) \
_GEN_DFP_QUAD(name, op1, op2, 0x00610801)
#define GEN_DFP_BF_A_Bp(name, op1, op2) \
_GEN_DFP_QUAD(name, op1, op2, 0x00600801)
#define GEN_DFP_BF_A_Bp_300(name, op1, op2) \
_GEN_DFP_QUAD_300(name, op1, op2, 0x00400001)
#define GEN_DFP_BF_A_DCM(name, op1, op2) \
_GEN_DFP_LONGx2(name, op1, op2, 0x00600001)
#define GEN_DFP_BF_Ap_DCM(name, op1, op2) \
_GEN_DFP_QUADx2(name, op1, op2, 0x00610001)
#define GEN_DFP_T_A_B_RMC_Rc(name, op1, op2) \
_GEN_DFP_LONGx4(name, op1, op2, 0x00000000)
#define GEN_DFP_Tp_Ap_Bp_RMC_Rc(name, op1, op2) \
_GEN_DFP_QUADx4(name, op1, op2, 0x02010800)
#define GEN_DFP_Tp_A_Bp_RMC_Rc(name, op1, op2) \
_GEN_DFP_QUADx4(name, op1, op2, 0x02000800)
#define GEN_DFP_TE_T_B_RMC_Rc(name, op1, op2) \
_GEN_DFP_LONGx4(name, op1, op2, 0x00000000)
#define GEN_DFP_TE_Tp_Bp_RMC_Rc(name, op1, op2) \
_GEN_DFP_QUADx4(name, op1, op2, 0x00200800)
#define GEN_DFP_R_T_B_RMC_Rc(name, op1, op2) \
_GEN_DFP_LONGx4(name, op1, op2, 0x001E0000)
#define GEN_DFP_R_Tp_Bp_RMC_Rc(name, op1, op2) \
_GEN_DFP_QUADx4(name, op1, op2, 0x003E0800)
#define GEN_DFP_SP_T_B_Rc(name, op1, op2) \
_GEN_DFP_LONG(name, op1, op2, 0x00070000)
#define GEN_DFP_SP_Tp_Bp_Rc(name, op1, op2) \
_GEN_DFP_QUAD(name, op1, op2, 0x00270800)
#define GEN_DFP_S_T_B_Rc(name, op1, op2) \
_GEN_DFP_LONG(name, op1, op2, 0x000F0000)
#define GEN_DFP_S_Tp_Bp_Rc(name, op1, op2) \
_GEN_DFP_QUAD(name, op1, op2, 0x002F0800)
#define GEN_DFP_T_A_SH_Rc(name, op1, op2) \
_GEN_DFP_LONGx2(name, op1, op2, 0x00000000)
#define GEN_DFP_Tp_Ap_SH_Rc(name, op1, op2) \
_GEN_DFP_QUADx2(name, op1, op2, 0x00210000)
GEN_DFP_T_A_B_Rc(dadd, 0x02, 0x00),
GEN_DFP_Tp_Ap_Bp_Rc(daddq, 0x02, 0x00),
GEN_DFP_T_A_B_Rc(dsub, 0x02, 0x10),
GEN_DFP_Tp_Ap_Bp_Rc(dsubq, 0x02, 0x10),
GEN_DFP_T_A_B_Rc(dmul, 0x02, 0x01),
GEN_DFP_Tp_Ap_Bp_Rc(dmulq, 0x02, 0x01),
GEN_DFP_T_A_B_Rc(ddiv, 0x02, 0x11),
GEN_DFP_Tp_Ap_Bp_Rc(ddivq, 0x02, 0x11),
GEN_DFP_BF_A_B(dcmpu, 0x02, 0x14),
GEN_DFP_BF_Ap_Bp(dcmpuq, 0x02, 0x14),
GEN_DFP_BF_A_B(dcmpo, 0x02, 0x04),
GEN_DFP_BF_Ap_Bp(dcmpoq, 0x02, 0x04),
GEN_DFP_BF_A_DCM(dtstdc, 0x02, 0x06),
GEN_DFP_BF_Ap_DCM(dtstdcq, 0x02, 0x06),
GEN_DFP_BF_A_DCM(dtstdg, 0x02, 0x07),
GEN_DFP_BF_Ap_DCM(dtstdgq, 0x02, 0x07),
GEN_DFP_BF_A_B(dtstex, 0x02, 0x05),
GEN_DFP_BF_Ap_Bp(dtstexq, 0x02, 0x05),
GEN_DFP_BF_A_B(dtstsf, 0x02, 0x15),
GEN_DFP_BF_A_Bp(dtstsfq, 0x02, 0x15),
GEN_DFP_BF_A_B_300(dtstsfi, 0x03, 0x15),
GEN_DFP_BF_A_Bp_300(dtstsfiq, 0x03, 0x15),
GEN_DFP_TE_T_B_RMC_Rc(dquai, 0x03, 0x02),
GEN_DFP_TE_Tp_Bp_RMC_Rc(dquaiq, 0x03, 0x02),
GEN_DFP_T_A_B_RMC_Rc(dqua, 0x03, 0x00),
GEN_DFP_Tp_Ap_Bp_RMC_Rc(dquaq, 0x03, 0x00),
GEN_DFP_T_A_B_RMC_Rc(drrnd, 0x03, 0x01),
GEN_DFP_Tp_A_Bp_RMC_Rc(drrndq, 0x03, 0x01),
GEN_DFP_R_T_B_RMC_Rc(drintx, 0x03, 0x03),
GEN_DFP_R_Tp_Bp_RMC_Rc(drintxq, 0x03, 0x03),
GEN_DFP_R_T_B_RMC_Rc(drintn, 0x03, 0x07),
GEN_DFP_R_Tp_Bp_RMC_Rc(drintnq, 0x03, 0x07),
GEN_DFP_T_B_Rc(dctdp, 0x02, 0x08),
GEN_DFP_Tp_B_Rc(dctqpq, 0x02, 0x08),
GEN_DFP_T_B_Rc(drsp, 0x02, 0x18),
GEN_DFP_Tp_Bp_Rc(drdpq, 0x02, 0x18),
GEN_DFP_T_B_Rc(dcffix, 0x02, 0x19),
GEN_DFP_Tp_B_Rc(dcffixq, 0x02, 0x19),
GEN_DFP_T_B_Rc(dctfix, 0x02, 0x09),
GEN_DFP_T_Bp_Rc(dctfixq, 0x02, 0x09),
GEN_DFP_SP_T_B_Rc(ddedpd, 0x02, 0x0a),
GEN_DFP_SP_Tp_Bp_Rc(ddedpdq, 0x02, 0x0a),
GEN_DFP_S_T_B_Rc(denbcd, 0x02, 0x1a),
GEN_DFP_S_Tp_Bp_Rc(denbcdq, 0x02, 0x1a),
GEN_DFP_T_B_Rc(dxex, 0x02, 0x0b),
GEN_DFP_T_Bp_Rc(dxexq, 0x02, 0x0b),
GEN_DFP_T_A_B_Rc(diex, 0x02, 0x1b),
GEN_DFP_Tp_A_Bp_Rc(diexq, 0x02, 0x1b),
GEN_DFP_T_A_SH_Rc(dscli, 0x02, 0x02),
GEN_DFP_Tp_Ap_SH_Rc(dscliq, 0x02, 0x02),
GEN_DFP_T_A_SH_Rc(dscri, 0x02, 0x03),
GEN_DFP_Tp_Ap_SH_Rc(dscriq, 0x02, 0x03),

View File

@ -17,25 +17,6 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Incorporate CIA into the constant when R=1.
* Validate that when R=1, RA=0.
*/
static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
{
d->rt = a->rt;
d->ra = a->ra;
d->si = a->si;
if (a->r) {
if (unlikely(a->ra != 0)) {
gen_invalid(ctx);
return false;
}
d->si += ctx->cia;
}
return true;
}
/*
* Fixed-Point Load/Store Instructions
*/
@ -51,15 +32,7 @@ static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update,
}
gen_set_access_type(ctx, ACCESS_INT);
ea = tcg_temp_new();
if (ra) {
tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
} else {
tcg_gen_mov_tl(ea, displ);
}
if (NARROW_MODE(ctx)) {
tcg_gen_ext32u_tl(ea, ea);
}
ea = do_ea_calc(ctx, ra, displ);
mop ^= ctx->default_tcg_memop_mask;
if (store) {
tcg_gen_qemu_st_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop);
@ -96,6 +69,107 @@ static bool do_ldst_X(DisasContext *ctx, arg_X *a, bool update,
return do_ldst(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, mop);
}
static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed)
{
#if defined(TARGET_PPC64)
TCGv ea;
TCGv_i64 low_addr_gpr, high_addr_gpr;
MemOp mop;
REQUIRE_INSNS_FLAGS(ctx, 64BX);
if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) {
if (ctx->pr) {
/* lq and stq were privileged prior to V. 2.07 */
gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return true;
}
if (ctx->le_mode) {
gen_align_no_le(ctx);
return true;
}
}
if (!store && unlikely(a->ra == a->rt)) {
gen_invalid(ctx);
return true;
}
gen_set_access_type(ctx, ACCESS_INT);
ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->si));
if (prefixed || !ctx->le_mode) {
low_addr_gpr = cpu_gpr[a->rt];
high_addr_gpr = cpu_gpr[a->rt + 1];
} else {
low_addr_gpr = cpu_gpr[a->rt + 1];
high_addr_gpr = cpu_gpr[a->rt];
}
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
if (HAVE_ATOMIC128) {
mop = DEF_MEMOP(MO_128);
TCGv_i32 oi = tcg_constant_i32(make_memop_idx(mop, ctx->mem_idx));
if (store) {
if (ctx->le_mode) {
gen_helper_stq_le_parallel(cpu_env, ea, low_addr_gpr,
high_addr_gpr, oi);
} else {
gen_helper_stq_be_parallel(cpu_env, ea, high_addr_gpr,
low_addr_gpr, oi);
}
} else {
if (ctx->le_mode) {
gen_helper_lq_le_parallel(low_addr_gpr, cpu_env, ea, oi);
tcg_gen_ld_i64(high_addr_gpr, cpu_env,
offsetof(CPUPPCState, retxh));
} else {
gen_helper_lq_be_parallel(high_addr_gpr, cpu_env, ea, oi);
tcg_gen_ld_i64(low_addr_gpr, cpu_env,
offsetof(CPUPPCState, retxh));
}
}
} else {
/* Restart with exclusive lock. */
gen_helper_exit_atomic(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
}
} else {
mop = DEF_MEMOP(MO_Q);
if (store) {
tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop);
} else {
tcg_gen_qemu_ld_i64(low_addr_gpr, ea, ctx->mem_idx, mop);
}
gen_addr_add(ctx, ea, ea, 8);
if (store) {
tcg_gen_qemu_st_i64(high_addr_gpr, ea, ctx->mem_idx, mop);
} else {
tcg_gen_qemu_ld_i64(high_addr_gpr, ea, ctx->mem_idx, mop);
}
}
tcg_temp_free(ea);
#else
qemu_build_not_reached();
#endif
return true;
}
static bool do_ldst_quad_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
{
arg_D d;
if (!resolve_PLS_D(ctx, &d, a)) {
return true;
}
return do_ldst_quad(ctx, &d, store, true);
}
/* Load Byte and Zero */
TRANS(LBZ, do_ldst_D, false, false, MO_UB)
TRANS(LBZX, do_ldst_X, false, false, MO_UB)
@ -137,6 +211,10 @@ TRANS64(LDU, do_ldst_D, true, false, MO_Q)
TRANS64(LDUX, do_ldst_X, true, false, MO_Q)
TRANS64(PLD, do_ldst_PLS_D, false, false, MO_Q)
/* Load Quadword */
TRANS64(LQ, do_ldst_quad, false, false);
TRANS64(PLQ, do_ldst_quad_PLS_D, false);
/* Store Byte */
TRANS(STB, do_ldst_D, false, true, MO_UB)
TRANS(STBX, do_ldst_X, false, true, MO_UB)
@ -165,6 +243,10 @@ TRANS64(STDU, do_ldst_D, true, true, MO_Q)
TRANS64(STDUX, do_ldst_X, true, true, MO_Q)
TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_Q)
/* Store Quadword */
TRANS64(STQ, do_ldst_quad, true, false);
TRANS64(PSTQ, do_ldst_quad_PLS_D, true);
/*
* Fixed-Point Compare Instructions
*/
@ -325,7 +407,86 @@ static bool trans_CFUGED(DisasContext *ctx, arg_X *a)
REQUIRE_64BIT(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
#if defined(TARGET_PPC64)
gen_helper_cfuged(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
gen_helper_CFUGED(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
#else
qemu_build_not_reached();
#endif
return true;
}
static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail)
{
TCGv_i64 t0, t1;
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
tcg_gen_and_i64(t0, src, mask);
if (trail) {
tcg_gen_ctzi_i64(t0, t0, -1);
} else {
tcg_gen_clzi_i64(t0, t0, -1);
}
tcg_gen_setcondi_i64(TCG_COND_NE, t1, t0, -1);
tcg_gen_andi_i64(t0, t0, 63);
tcg_gen_xori_i64(t0, t0, 63);
if (trail) {
tcg_gen_shl_i64(t0, mask, t0);
tcg_gen_shl_i64(t0, t0, t1);
} else {
tcg_gen_shr_i64(t0, mask, t0);
tcg_gen_shr_i64(t0, t0, t1);
}
tcg_gen_ctpop_i64(dst, t0);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
}
static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
#if defined(TARGET_PPC64)
do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], false);
#else
qemu_build_not_reached();
#endif
return true;
}
static bool trans_CNTTZDM(DisasContext *ctx, arg_X *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
#if defined(TARGET_PPC64)
do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], true);
#else
qemu_build_not_reached();
#endif
return true;
}
static bool trans_PDEPD(DisasContext *ctx, arg_X *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
#if defined(TARGET_PPC64)
gen_helper_PDEPD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
#else
qemu_build_not_reached();
#endif
return true;
}
static bool trans_PEXTD(DisasContext *ctx, arg_X *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
#if defined(TARGET_PPC64)
gen_helper_PEXTD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
#else
qemu_build_not_reached();
#endif

View File

@ -854,99 +854,6 @@ static void gen_mtfsfi(DisasContext *ctx)
gen_helper_float_check_status(cpu_env);
}
/*** Floating-point load ***/
#define GEN_LDF(name, ldop, opc, type) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
t0 = tcg_temp_new_i64(); \
gen_addr_imm_index(ctx, EA, 0); \
gen_qemu_##ldop(ctx, t0, EA); \
set_fpr(rD(ctx->opcode), t0); \
tcg_temp_free(EA); \
tcg_temp_free_i64(t0); \
}
#define GEN_LDUF(name, ldop, opc, type) \
static void glue(gen_, name##u)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
if (unlikely(rA(ctx->opcode) == 0)) { \
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
t0 = tcg_temp_new_i64(); \
gen_addr_imm_index(ctx, EA, 0); \
gen_qemu_##ldop(ctx, t0, EA); \
set_fpr(rD(ctx->opcode), t0); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
tcg_temp_free_i64(t0); \
}
#define GEN_LDUXF(name, ldop, opc, type) \
static void glue(gen_, name##ux)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
t0 = tcg_temp_new_i64(); \
if (unlikely(rA(ctx->opcode) == 0)) { \
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
gen_qemu_##ldop(ctx, t0, EA); \
set_fpr(rD(ctx->opcode), t0); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
tcg_temp_free_i64(t0); \
}
#define GEN_LDXF(name, ldop, opc2, opc3, type) \
static void glue(gen_, name##x)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
t0 = tcg_temp_new_i64(); \
gen_addr_reg_index(ctx, EA); \
gen_qemu_##ldop(ctx, t0, EA); \
set_fpr(rD(ctx->opcode), t0); \
tcg_temp_free(EA); \
tcg_temp_free_i64(t0); \
}
#define GEN_LDFS(name, ldop, op, type) \
GEN_LDF(name, ldop, op | 0x20, type); \
GEN_LDUF(name, ldop, op | 0x21, type); \
GEN_LDUXF(name, ldop, op | 0x01, type); \
GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr)
{
TCGv_i32 tmp = tcg_temp_new_i32();
@ -955,11 +862,6 @@ static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr)
tcg_temp_free_i32(tmp);
}
/* lfd lfdu lfdux lfdx */
GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT);
/* lfs lfsu lfsux lfsx */
GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
/* lfdepx (external PID lfdx) */
static void gen_lfdepx(DisasContext *ctx)
{
@ -1089,73 +991,6 @@ static void gen_lfiwzx(DisasContext *ctx)
tcg_temp_free(EA);
tcg_temp_free_i64(t0);
}
/*** Floating-point store ***/
#define GEN_STF(name, stop, opc, type) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
t0 = tcg_temp_new_i64(); \
gen_addr_imm_index(ctx, EA, 0); \
get_fpr(t0, rS(ctx->opcode)); \
gen_qemu_##stop(ctx, t0, EA); \
tcg_temp_free(EA); \
tcg_temp_free_i64(t0); \
}
#define GEN_STUF(name, stop, opc, type) \
static void glue(gen_, name##u)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
if (unlikely(rA(ctx->opcode) == 0)) { \
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
t0 = tcg_temp_new_i64(); \
gen_addr_imm_index(ctx, EA, 0); \
get_fpr(t0, rS(ctx->opcode)); \
gen_qemu_##stop(ctx, t0, EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
tcg_temp_free_i64(t0); \
}
#define GEN_STUXF(name, stop, opc, type) \
static void glue(gen_, name##ux)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
if (unlikely(!ctx->fpu_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_FPU); \
return; \
} \
if (unlikely(rA(ctx->opcode) == 0)) { \
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
t0 = tcg_temp_new_i64(); \
gen_addr_reg_index(ctx, EA); \
get_fpr(t0, rS(ctx->opcode)); \
gen_qemu_##stop(ctx, t0, EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
tcg_temp_free_i64(t0); \
}
#define GEN_STXF(name, stop, opc2, opc3, type) \
static void glue(gen_, name##x)(DisasContext *ctx) \
@ -1176,12 +1011,6 @@ static void glue(gen_, name##x)(DisasContext *ctx) \
tcg_temp_free_i64(t0); \
}
#define GEN_STFS(name, stop, op, type) \
GEN_STF(name, stop, op | 0x20, type); \
GEN_STUF(name, stop, op | 0x21, type); \
GEN_STUXF(name, stop, op | 0x01, type); \
GEN_STXF(name, stop, 0x17, op | 0x00, type)
static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr)
{
TCGv_i32 tmp = tcg_temp_new_i32();
@ -1190,11 +1019,6 @@ static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr)
tcg_temp_free_i32(tmp);
}
/* stfd stfdu stfdux stfdx */
GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT);
/* stfs stfsu stfsux stfsx */
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
/* stfdepx (external PID lfdx) */
static void gen_stfdepx(DisasContext *ctx)
{
@ -1473,6 +1297,91 @@ static void gen_stfqx(DisasContext *ctx)
tcg_temp_free_i64(t1);
}
/* Floating-point Load/Store Instructions */
static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ,
bool update, bool store, bool single)
{
TCGv ea;
TCGv_i64 t0;
REQUIRE_INSNS_FLAGS(ctx, FLOAT);
REQUIRE_FPU(ctx);
if (update && ra == 0) {
gen_invalid(ctx);
return true;
}
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new_i64();
ea = do_ea_calc(ctx, ra, displ);
if (store) {
get_fpr(t0, rt);
if (single) {
gen_qemu_st32fs(ctx, t0, ea);
} else {
gen_qemu_st64_i64(ctx, t0, ea);
}
} else {
if (single) {
gen_qemu_ld32fs(ctx, t0, ea);
} else {
gen_qemu_ld64_i64(ctx, t0, ea);
}
set_fpr(rt, t0);
}
if (update) {
tcg_gen_mov_tl(cpu_gpr[rt], ea);
}
tcg_temp_free_i64(t0);
tcg_temp_free(ea);
return true;
}
static bool do_lsfp_D(DisasContext *ctx, arg_D *a, bool update, bool store,
bool single)
{
return do_lsfpsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store,
single);
}
static bool do_lsfp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update,
bool store, bool single)
{
arg_D d;
if (!resolve_PLS_D(ctx, &d, a)) {
return true;
}
return do_lsfp_D(ctx, &d, update, store, single);
}
static bool do_lsfp_X(DisasContext *ctx, arg_X *a, bool update,
bool store, bool single)
{
return do_lsfpsd(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, single);
}
TRANS(LFS, do_lsfp_D, false, false, true)
TRANS(LFSU, do_lsfp_D, true, false, true)
TRANS(LFSX, do_lsfp_X, false, false, true)
TRANS(LFSUX, do_lsfp_X, true, false, true)
TRANS(PLFS, do_lsfp_PLS_D, false, false, true)
TRANS(LFD, do_lsfp_D, false, false, false)
TRANS(LFDU, do_lsfp_D, true, false, false)
TRANS(LFDX, do_lsfp_X, false, false, false)
TRANS(LFDUX, do_lsfp_X, true, false, false)
TRANS(PLFD, do_lsfp_PLS_D, false, false, false)
TRANS(STFS, do_lsfp_D, false, true, true)
TRANS(STFSU, do_lsfp_D, true, true, true)
TRANS(STFSX, do_lsfp_X, false, true, true)
TRANS(STFSUX, do_lsfp_X, true, true, true)
TRANS(PSTFS, do_lsfp_PLS_D, false, true, true)
TRANS(STFD, do_lsfp_D, false, true, false)
TRANS(STFDU, do_lsfp_D, true, true, false)
TRANS(STFDX, do_lsfp_X, false, true, false)
TRANS(STFDUX, do_lsfp_X, true, true, false)
TRANS(PSTFD, do_lsfp_PLS_D, false, true, false)
#undef _GEN_FLOAT_ACB
#undef GEN_FLOAT_ACB
#undef _GEN_FLOAT_AB

View File

@ -50,43 +50,14 @@ GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT),
GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT),
GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT),
#define GEN_LDF(name, ldop, opc, type) \
GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
#define GEN_LDUF(name, ldop, opc, type) \
GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
#define GEN_LDUXF(name, ldop, opc, type) \
GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
#define GEN_LDXF(name, ldop, opc2, opc3, type) \
GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
#define GEN_LDFS(name, ldop, op, type) \
GEN_LDF(name, ldop, op | 0x20, type) \
GEN_LDUF(name, ldop, op | 0x21, type) \
GEN_LDUXF(name, ldop, op | 0x01, type) \
GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT)
GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT)
GEN_HANDLER_E(lfdepx, 0x1F, 0x1F, 0x12, 0x00000001, PPC_NONE, PPC2_BOOKE206),
GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205),
GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206),
GEN_HANDLER_E(lfdpx, 0x1F, 0x17, 0x18, 0x00200001, PPC_NONE, PPC2_ISA205),
#define GEN_STF(name, stop, opc, type) \
GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
#define GEN_STUF(name, stop, opc, type) \
GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
#define GEN_STUXF(name, stop, opc, type) \
GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
#define GEN_STXF(name, stop, opc2, opc3, type) \
GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
#define GEN_STFS(name, stop, op, type) \
GEN_STF(name, stop, op | 0x20, type) \
GEN_STUF(name, stop, op | 0x21, type) \
GEN_STUXF(name, stop, op | 0x01, type) \
GEN_STXF(name, stop, 0x17, op | 0x00, type)
GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT)
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT)
GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX)
GEN_HANDLER_E(stfdepx, 0x1F, 0x1F, 0x16, 0x00000001, PPC_NONE, PPC2_BOOKE206),
GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205),

View File

@ -1,56 +0,0 @@
/*
* Power ISA decode for Vector Facility instructions
*
* Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#define REQUIRE_ALTIVEC(CTX) \
do { \
if (unlikely(!(CTX)->altivec_enabled)) { \
gen_exception((CTX), POWERPC_EXCP_VPU); \
return true; \
} \
} while (0)
static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
{
TCGv_i64 tgt, src, mask;
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_ALTIVEC(ctx);
tgt = tcg_temp_new_i64();
src = tcg_temp_new_i64();
mask = tcg_temp_new_i64();
/* centrifuge lower double word */
get_cpu_vsrl(src, a->vra + 32);
get_cpu_vsrl(mask, a->vrb + 32);
gen_helper_cfuged(tgt, src, mask);
set_cpu_vsrl(a->vrt + 32, tgt);
/* centrifuge higher double word */
get_cpu_vsrh(src, a->vra + 32);
get_cpu_vsrh(mask, a->vrb + 32);
gen_helper_cfuged(tgt, src, mask);
set_cpu_vsrh(a->vrt + 32, tgt);
tcg_temp_free_i64(tgt);
tcg_temp_free_i64(src);
tcg_temp_free_i64(mask);
return true;
}

View File

@ -1217,10 +1217,6 @@ GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15);
GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14);
GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12);
GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8);
GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
@ -1231,12 +1227,184 @@ GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
vextractuh, PPC_NONE, PPC2_ISA300);
GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
vextractuw, PPC_NONE, PPC2_ISA300);
GEN_VXFORM_DUAL(vspltisb, PPC_ALTIVEC, PPC_NONE,
vinsertb, PPC_NONE, PPC2_ISA300);
GEN_VXFORM_DUAL(vspltish, PPC_ALTIVEC, PPC_NONE,
vinserth, PPC_NONE, PPC2_ISA300);
GEN_VXFORM_DUAL(vspltisw, PPC_ALTIVEC, PPC_NONE,
vinsertw, PPC_NONE, PPC2_ISA300);
static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right,
void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv))
{
TCGv_ptr vrt, vra, vrb;
TCGv rc;
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
vrt = gen_avr_ptr(a->vrt);
vra = gen_avr_ptr(a->vra);
vrb = gen_avr_ptr(a->vrb);
rc = tcg_temp_new();
tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F);
if (right) {
tcg_gen_subfi_tl(rc, 32 - size, rc);
}
gen_helper(cpu_env, vrt, vra, vrb, rc);
tcg_temp_free_ptr(vrt);
tcg_temp_free_ptr(vra);
tcg_temp_free_ptr(vrb);
tcg_temp_free(rc);
return true;
}
TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX)
TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX)
TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX)
TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX)
TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX)
TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX)
TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX)
TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX)
static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
{
TCGv_ptr t;
TCGv idx;
t = gen_avr_ptr(vrt);
idx = tcg_temp_new();
tcg_gen_andi_tl(idx, ra, 0xF);
if (right) {
tcg_gen_subfi_tl(idx, 16 - size, idx);
}
gen_helper(cpu_env, t, rb, idx);
tcg_temp_free_ptr(t);
tcg_temp_free(idx);
return true;
}
static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
{
bool ok;
TCGv_i64 val;
val = tcg_temp_new_i64();
get_avr64(val, vrb, true);
ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper);
tcg_temp_free_i64(val);
return ok;
}
static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
{
bool ok;
TCGv_i64 val;
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
val = tcg_temp_new_i64();
tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper);
tcg_temp_free_i64(val);
return ok;
}
static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
{
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb,
gen_helper);
}
static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
{
bool ok;
TCGv_i64 val;
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
if (a->uim > (16 - size)) {
/*
* PowerISA v3.1 says that the resulting value is undefined in this
* case, so just log a guest error and leave VRT unchanged. The
* real hardware would do a partial insert, e.g. if VRT is zeroed and
* RB is 0x12345678, executing "vinsw VRT,RB,14" results in
* VRT = 0x0000...00001234, but we don't bother to reproduce this
* behavior as software shouldn't rely on it.
*/
qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at"
" 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
16 - size);
return true;
}
val = tcg_temp_new_i64();
tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val,
gen_helper);
tcg_temp_free_i64(val);
return ok;
}
static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
{
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
REQUIRE_VECTOR(ctx);
if (a->uim > (16 - size)) {
qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at"
" 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
16 - size);
return true;
}
return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb,
gen_helper);
}
TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX)
TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX)
TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX)
TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX)
TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX)
TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX)
TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX)
TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX)
TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX)
TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX)
TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX)
TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX)
TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX)
TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX)
TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX)
TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX)
TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX)
TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX)
TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX)
TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX)
static void gen_vsldoi(DisasContext *ctx)
{
@ -1257,6 +1425,72 @@ static void gen_vsldoi(DisasContext *ctx)
tcg_temp_free_i32(sh);
}
static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a)
{
TCGv_i64 t0, t1, t2;
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
get_avr64(t0, a->vra, true);
get_avr64(t1, a->vra, false);
if (a->sh != 0) {
t2 = tcg_temp_new_i64();
get_avr64(t2, a->vrb, true);
tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh);
tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh);
tcg_temp_free_i64(t2);
}
set_avr64(a->vrt, t0, true);
set_avr64(a->vrt, t1, false);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
return true;
}
static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a)
{
TCGv_i64 t2, t1, t0;
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
get_avr64(t0, a->vrb, false);
get_avr64(t1, a->vrb, true);
if (a->sh != 0) {
t2 = tcg_temp_new_i64();
get_avr64(t2, a->vra, false);
tcg_gen_extract2_i64(t0, t0, t1, a->sh);
tcg_gen_extract2_i64(t1, t1, t2, a->sh);
tcg_temp_free_i64(t2);
}
set_avr64(a->vrt, t0, false);
set_avr64(a->vrt, t1, true);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
return true;
}
#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
{ \
@ -1559,6 +1793,86 @@ GEN_VXFORM3(vpermxor, 22, 0xFF)
GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
{
static const GVecGen3 g = {
.fni8 = gen_helper_CFUGED,
.vece = MO_64,
};
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
avr_full_offset(a->vrb), 16, 16, &g);
return true;
}
static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a)
{
static const GVecGen3i g = {
.fni8 = do_cntzdm,
.vece = MO_64,
};
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
avr_full_offset(a->vrb), 16, 16, false, &g);
return true;
}
static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a)
{
static const GVecGen3i g = {
.fni8 = do_cntzdm,
.vece = MO_64,
};
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
avr_full_offset(a->vrb), 16, 16, true, &g);
return true;
}
static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a)
{
static const GVecGen3 g = {
.fni8 = gen_helper_PDEPD,
.vece = MO_64,
};
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
avr_full_offset(a->vrb), 16, 16, &g);
return true;
}
static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a)
{
static const GVecGen3 g = {
.fni8 = gen_helper_PEXTD,
.vece = MO_64,
};
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
avr_full_offset(a->vrb), 16, 16, &g);
return true;
}
#undef GEN_VR_LDX
#undef GEN_VR_STX
#undef GEN_VR_LVE

View File

@ -225,13 +225,9 @@ GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000,
GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000,
PPC_ALTIVEC),
GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000),
GEN_VXFORM_DUAL_INV(vspltisb, vinsertb, 6, 12, 0x00000000, 0x100000,
PPC_ALTIVEC),
GEN_VXFORM_DUAL_INV(vspltish, vinserth, 6, 13, 0x00000000, 0x100000,
PPC_ALTIVEC),
GEN_VXFORM_DUAL_INV(vspltisw, vinsertw, 6, 14, 0x00000000, 0x100000,
PPC_ALTIVEC),
GEN_VXFORM_300_EXT(vinsertd, 6, 15, 0x100000),
GEN_VXFORM(vspltisb, 6, 12),
GEN_VXFORM(vspltish, 6, 13),
GEN_VXFORM(vspltisw, 6, 14),
GEN_VXFORM_300_EO(vnegw, 0x01, 0x18, 0x06),
GEN_VXFORM_300_EO(vnegd, 0x01, 0x18, 0x07),
GEN_VXFORM_300_EO(vextsb2w, 0x01, 0x18, 0x10),

File diff suppressed because it is too large Load Diff

View File

@ -10,7 +10,6 @@ GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(lxvh8x, 0x1F, 0x0C, 0x19, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(lxvb16x, 0x1F, 0x0C, 0x1B, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(lxvx, 0x1F, 0x0C, 0x08, 0x00000040, PPC_NONE, PPC2_ISA300),
#if defined(TARGET_PPC64)
GEN_HANDLER_E(lxvl, 0x1F, 0x0D, 0x08, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(lxvll, 0x1F, 0x0D, 0x09, 0, PPC_NONE, PPC2_ISA300),
@ -25,7 +24,6 @@ GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(stxvh8x, 0x1F, 0x0C, 0x1D, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(stxvb16x, 0x1F, 0x0C, 0x1F, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(stxvx, 0x1F, 0x0C, 0x0C, 0, PPC_NONE, PPC2_ISA300),
#if defined(TARGET_PPC64)
GEN_HANDLER_E(stxvl, 0x1F, 0x0D, 0x0C, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(stxvll, 0x1F, 0x0D, 0x0D, 0, PPC_NONE, PPC2_ISA300),
@ -350,8 +348,6 @@ GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX),
GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX),
GEN_XX3FORM(xxperm, 0x08, 0x03, PPC2_ISA300),
GEN_XX3FORM(xxpermr, 0x08, 0x07, PPC2_ISA300),
GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX),
GEN_XX1FORM(xxspltib, 0x08, 0x0B, PPC2_ISA300),
GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00),
GEN_XX2FORM_EXT(xxextractuw, 0x0A, 0x0A, PPC2_ISA300),
GEN_XX2FORM_EXT(xxinsertw, 0x0A, 0x0B, PPC2_ISA300),