More generic 64 bit multiplication support, by Aurelien Jarno.

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@2821 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
ths 2007-05-16 11:59:40 +00:00
parent 100ce98812
commit 69d357286d
7 changed files with 84 additions and 65 deletions

View File

@ -365,6 +365,7 @@ endif
# must use static linking to avoid leaving stuff in virtual address space
VL_OBJS=vl.o osdep.o readline.o monitor.o pci.o console.o loader.o isa_mmio.o
VL_OBJS+=cutils.o
VL_OBJS+=host-utils.o
VL_OBJS+=block.o block-raw.o
VL_OBJS+=block-cow.o block-qcow.o aes.o block-vmdk.o block-cloop.o block-dmg.o block-bochs.o block-vpc.o block-vvfat.o block-qcow2.o
VL_OBJS+=irq.o

View File

@ -78,6 +78,9 @@ void optimize_flags_init(void);
extern FILE *logfile;
extern int loglevel;
void muls64(int64_t *phigh, int64_t *plow, int64_t a, int64_t b);
void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b);
int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);

75
host-utils.c Normal file
View File

@ -0,0 +1,75 @@
/*
* Utility compute operations used by translated code.
*
* Copyright (c) 2007 Aurelien Jarno
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "vl.h"
/* Signed 64x64 -> 128 multiplication */
void muls64(int64_t *phigh, int64_t *plow, int64_t a, int64_t b)
{
#if defined(__x86_64__)
__asm__ ("imul %0\n\t"
: "=d" (*phigh), "=a" (*plow)
: "a" (a), "0" (b)
);
#else
int64_t ph;
uint64_t pm1, pm2, pl;
pl = (uint64_t)((uint32_t)a) * (uint64_t)((uint32_t)b);
pm1 = (a >> 32) * (uint32_t)b;
pm2 = (uint32_t)a * (b >> 32);
ph = (a >> 32) * (b >> 32);
ph += (int64_t)pm1 >> 32;
pm1 = (uint64_t)((uint32_t)pm1) + pm2 + (pl >> 32);
*phigh = ph + ((int64_t)pm1 >> 32);
*plow = (pm1 << 32) + (uint32_t)pl;
#endif
}
/* Unsigned 64x64 -> 128 multiplication */
void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b)
{
#if defined(__x86_64__)
__asm__ ("mul %0\n\t"
: "=d" (*phigh), "=a" (*plow)
: "a" (a), "0" (b)
);
#else
uint64_t ph, pm1, pm2, pl;
pl = (uint64_t)((uint32_t)a) * (uint64_t)((uint32_t)b);
pm1 = (a >> 32) * (uint32_t)b;
pm2 = (uint32_t)a * (b >> 32);
ph = (a >> 32) * (b >> 32);
ph += pm1 >> 32;
pm1 = (uint64_t)((uint32_t)pm1) + pm2 + (pl >> 32);
*phigh = ph + (pm1 >> 32);
*plow = (pm1 << 32) + (uint32_t)pl;
#endif
}

View File

@ -3620,50 +3620,6 @@ static void neg128(uint64_t *plow, uint64_t *phigh)
add128(plow, phigh, 1, 0);
}
static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
{
uint32_t a0, a1, b0, b1;
uint64_t v;
a0 = a;
a1 = a >> 32;
b0 = b;
b1 = b >> 32;
v = (uint64_t)a0 * (uint64_t)b0;
*plow = v;
*phigh = 0;
v = (uint64_t)a0 * (uint64_t)b1;
add128(plow, phigh, v << 32, v >> 32);
v = (uint64_t)a1 * (uint64_t)b0;
add128(plow, phigh, v << 32, v >> 32);
v = (uint64_t)a1 * (uint64_t)b1;
*phigh += v;
#ifdef DEBUG_MULDIV
printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n",
a, b, *phigh, *plow);
#endif
}
static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
{
int sa, sb;
sa = (a < 0);
if (sa)
a = -a;
sb = (b < 0);
if (sb)
b = -b;
mul64(plow, phigh, a, b);
if (sa ^ sb) {
neg128(plow, phigh);
}
}
/* return TRUE if overflow */
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
{
@ -3731,7 +3687,7 @@ void helper_mulq_EAX_T0(void)
{
uint64_t r0, r1;
mul64(&r0, &r1, EAX, T0);
mulu64(&r1, &r0, EAX, T0);
EAX = r0;
EDX = r1;
CC_DST = r0;
@ -3742,7 +3698,7 @@ void helper_imulq_EAX_T0(void)
{
uint64_t r0, r1;
imul64(&r0, &r1, EAX, T0);
muls64(&r1, &r0, EAX, T0);
EAX = r0;
EDX = r1;
CC_DST = r0;
@ -3753,7 +3709,7 @@ void helper_imulq_T0_T1(void)
{
uint64_t r0, r1;
imul64(&r0, &r1, T0, T1);
muls64(&r1, &r0, T0, T1);
T0 = r0;
CC_DST = r0;
CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));

View File

@ -97,8 +97,6 @@ void do_ddiv (void);
#if TARGET_LONG_BITS > HOST_LONG_BITS
void do_ddivu (void);
#endif
void do_dmult (void);
void do_dmultu (void);
#endif
void do_mfc0_random(void);
void do_mfc0_count(void);

View File

@ -874,13 +874,13 @@ void op_msubu (void)
#ifdef TARGET_MIPS64
void op_dmult (void)
{
CALL_FROM_TB0(do_dmult);
CALL_FROM_TB4(muls64, &(env->HI), &(env->LO), T0, T1);
RETURN();
}
void op_dmultu (void)
{
CALL_FROM_TB0(do_dmultu);
CALL_FROM_TB4(mulu64, &(env->HI), &(env->LO), T0, T1);
RETURN();
}
#endif

View File

@ -228,20 +228,6 @@ void do_div (void)
#endif
#ifdef TARGET_MIPS64
void do_dmult (void)
{
env->LO = (int64_t)T0 * (int64_t)T1;
/* XXX */
env->HI = (env->LO | (1ULL << 63)) ? ~0ULL : 0ULL;
}
void do_dmultu (void)
{
env->LO = T0 * T1;
/* XXX */
env->HI = 0;
}
void do_ddiv (void)
{
if (T1 != 0) {