2008-02-01 11:05:41 +01:00
|
|
|
/*
|
|
|
|
* Tiny Code Generator for QEMU
|
|
|
|
*
|
|
|
|
* Copyright (c) 2008 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
2010-06-03 19:40:04 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* DEF(name, oargs, iargs, cargs, flags)
|
|
|
|
*/
|
2008-02-01 11:05:41 +01:00
|
|
|
|
|
|
|
/* predefined ops */
|
2013-05-02 12:57:40 +02:00
|
|
|
DEF(discard, 1, 0, 0, TCG_OPF_NOT_PRESENT)
|
|
|
|
DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
|
|
|
|
|
|
|
|
/* variable number of parameters */
|
2014-04-25 21:19:33 +02:00
|
|
|
DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT)
|
2008-02-04 01:37:54 +01:00
|
|
|
|
2012-10-09 21:53:08 +02:00
|
|
|
DEF(br, 0, 0, 1, TCG_OPF_BB_END)
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2019-05-17 22:39:56 +02:00
|
|
|
#define IMPL(X) (__builtin_constant_p(X) && (X) <= 0 ? TCG_OPF_NOT_PRESENT : 0)
|
2011-08-17 23:11:46 +02:00
|
|
|
#if TCG_TARGET_REG_BITS == 32
|
|
|
|
# define IMPL64 TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT
|
|
|
|
#else
|
|
|
|
# define IMPL64 TCG_OPF_64BIT
|
|
|
|
#endif
|
|
|
|
|
2016-07-14 22:20:13 +02:00
|
|
|
DEF(mb, 0, 0, 1, 0)
|
|
|
|
|
2014-04-25 21:19:33 +02:00
|
|
|
DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT)
|
2010-06-03 19:40:04 +02:00
|
|
|
DEF(setcond_i32, 1, 2, 1, 0)
|
2012-09-21 19:13:34 +02:00
|
|
|
DEF(movcond_i32, 1, 4, 1, IMPL(TCG_TARGET_HAS_movcond_i32))
|
2008-02-01 11:05:41 +01:00
|
|
|
/* load/store */
|
2010-06-03 19:40:04 +02:00
|
|
|
DEF(ld8u_i32, 1, 1, 1, 0)
|
|
|
|
DEF(ld8s_i32, 1, 1, 1, 0)
|
|
|
|
DEF(ld16u_i32, 1, 1, 1, 0)
|
|
|
|
DEF(ld16s_i32, 1, 1, 1, 0)
|
|
|
|
DEF(ld_i32, 1, 1, 1, 0)
|
2012-10-09 21:53:08 +02:00
|
|
|
DEF(st8_i32, 0, 2, 1, 0)
|
|
|
|
DEF(st16_i32, 0, 2, 1, 0)
|
|
|
|
DEF(st_i32, 0, 2, 1, 0)
|
2008-02-01 11:05:41 +01:00
|
|
|
/* arith */
|
2010-06-03 19:40:04 +02:00
|
|
|
DEF(add_i32, 1, 2, 0, 0)
|
|
|
|
DEF(sub_i32, 1, 2, 0, 0)
|
|
|
|
DEF(mul_i32, 1, 2, 0, 0)
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(div_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32))
|
|
|
|
DEF(divu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32))
|
2013-03-12 06:41:47 +01:00
|
|
|
DEF(rem_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32))
|
|
|
|
DEF(remu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32))
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(div2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32))
|
|
|
|
DEF(divu2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32))
|
2010-06-03 19:40:04 +02:00
|
|
|
DEF(and_i32, 1, 2, 0, 0)
|
|
|
|
DEF(or_i32, 1, 2, 0, 0)
|
|
|
|
DEF(xor_i32, 1, 2, 0, 0)
|
2009-03-09 19:50:53 +01:00
|
|
|
/* shifts/rotates */
|
2010-06-03 19:40:04 +02:00
|
|
|
DEF(shl_i32, 1, 2, 0, 0)
|
|
|
|
DEF(shr_i32, 1, 2, 0, 0)
|
|
|
|
DEF(sar_i32, 1, 2, 0, 0)
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(rotl_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
|
|
|
|
DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
|
|
|
|
DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32))
|
2016-10-14 19:04:32 +02:00
|
|
|
DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32))
|
|
|
|
DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32))
|
2019-02-25 19:29:25 +01:00
|
|
|
DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32))
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2020-10-08 22:21:43 +02:00
|
|
|
DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2013-02-20 08:51:49 +01:00
|
|
|
DEF(add2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_add2_i32))
|
|
|
|
DEF(sub2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_sub2_i32))
|
|
|
|
DEF(mulu2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_mulu2_i32))
|
2013-02-20 08:51:53 +01:00
|
|
|
DEF(muls2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_muls2_i32))
|
2013-08-14 23:35:56 +02:00
|
|
|
DEF(muluh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i32))
|
|
|
|
DEF(mulsh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i32))
|
2020-10-08 22:21:43 +02:00
|
|
|
DEF(brcond2_i32, 0, 4, 2,
|
|
|
|
TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL(TCG_TARGET_REG_BITS == 32))
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(setcond2_i32, 1, 4, 1, IMPL(TCG_TARGET_REG_BITS == 32))
|
|
|
|
|
|
|
|
DEF(ext8s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8s_i32))
|
|
|
|
DEF(ext16s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16s_i32))
|
|
|
|
DEF(ext8u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8u_i32))
|
|
|
|
DEF(ext16u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16u_i32))
|
2021-06-13 06:32:27 +02:00
|
|
|
DEF(bswap16_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap16_i32))
|
|
|
|
DEF(bswap32_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap32_i32))
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32))
|
|
|
|
DEF(neg_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_neg_i32))
|
|
|
|
DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32))
|
|
|
|
DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32))
|
|
|
|
DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32))
|
|
|
|
DEF(nand_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nand_i32))
|
|
|
|
DEF(nor_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nor_i32))
|
2016-11-16 09:23:28 +01:00
|
|
|
DEF(clz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_clz_i32))
|
|
|
|
DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32))
|
2016-11-21 11:13:39 +01:00
|
|
|
DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32))
|
2011-08-17 23:11:46 +02:00
|
|
|
|
2014-04-25 21:19:33 +02:00
|
|
|
DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(setcond_i64, 1, 2, 1, IMPL64)
|
2012-09-21 19:13:34 +02:00
|
|
|
DEF(movcond_i64, 1, 4, 1, IMPL64 | IMPL(TCG_TARGET_HAS_movcond_i64))
|
2008-02-01 11:05:41 +01:00
|
|
|
/* load/store */
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(ld8u_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld8s_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld16u_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld16s_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld32u_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld32s_i64, 1, 1, 1, IMPL64)
|
|
|
|
DEF(ld_i64, 1, 1, 1, IMPL64)
|
2012-10-09 21:53:08 +02:00
|
|
|
DEF(st8_i64, 0, 2, 1, IMPL64)
|
|
|
|
DEF(st16_i64, 0, 2, 1, IMPL64)
|
|
|
|
DEF(st32_i64, 0, 2, 1, IMPL64)
|
|
|
|
DEF(st_i64, 0, 2, 1, IMPL64)
|
2008-02-01 11:05:41 +01:00
|
|
|
/* arith */
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(add_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(sub_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(mul_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(div_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64))
|
|
|
|
DEF(divu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64))
|
2013-03-12 06:41:47 +01:00
|
|
|
DEF(rem_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64))
|
|
|
|
DEF(remu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64))
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(div2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64))
|
|
|
|
DEF(divu2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64))
|
|
|
|
DEF(and_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(or_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(xor_i64, 1, 2, 0, IMPL64)
|
2009-03-09 19:50:53 +01:00
|
|
|
/* shifts/rotates */
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(shl_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(shr_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(sar_i64, 1, 2, 0, IMPL64)
|
|
|
|
DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
|
|
|
|
DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
|
|
|
|
DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64))
|
2016-10-14 19:04:32 +02:00
|
|
|
DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64))
|
|
|
|
DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64))
|
2019-02-25 19:29:25 +01:00
|
|
|
DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64))
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2015-07-27 12:41:45 +02:00
|
|
|
/* size changing ops */
|
|
|
|
DEF(ext_i32_i64, 1, 1, 0, IMPL64)
|
|
|
|
DEF(extu_i32_i64, 1, 1, 0, IMPL64)
|
2015-07-24 16:16:00 +02:00
|
|
|
DEF(extrl_i64_i32, 1, 1, 0,
|
|
|
|
IMPL(TCG_TARGET_HAS_extrl_i64_i32)
|
|
|
|
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
|
|
|
|
DEF(extrh_i64_i32, 1, 1, 0,
|
|
|
|
IMPL(TCG_TARGET_HAS_extrh_i64_i32)
|
2013-09-10 02:03:24 +02:00
|
|
|
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
|
|
|
|
|
2020-10-08 22:21:43 +02:00
|
|
|
DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL64)
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64))
|
|
|
|
DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64))
|
|
|
|
DEF(ext32s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32s_i64))
|
|
|
|
DEF(ext8u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8u_i64))
|
|
|
|
DEF(ext16u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16u_i64))
|
|
|
|
DEF(ext32u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32u_i64))
|
2021-06-13 06:32:27 +02:00
|
|
|
DEF(bswap16_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64))
|
|
|
|
DEF(bswap32_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64))
|
|
|
|
DEF(bswap64_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64))
|
2011-08-17 23:11:46 +02:00
|
|
|
DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64))
|
|
|
|
DEF(neg_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_neg_i64))
|
|
|
|
DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64))
|
|
|
|
DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64))
|
|
|
|
DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64))
|
|
|
|
DEF(nand_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nand_i64))
|
|
|
|
DEF(nor_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nor_i64))
|
2016-11-16 09:23:28 +01:00
|
|
|
DEF(clz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_clz_i64))
|
|
|
|
DEF(ctz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctz_i64))
|
2016-11-21 11:13:39 +01:00
|
|
|
DEF(ctpop_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctpop_i64))
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2013-02-20 08:51:52 +01:00
|
|
|
DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64))
|
|
|
|
DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64))
|
|
|
|
DEF(mulu2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulu2_i64))
|
2013-02-20 08:51:53 +01:00
|
|
|
DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64))
|
2018-03-27 05:37:24 +02:00
|
|
|
DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64))
|
|
|
|
DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64))
|
2013-02-20 08:51:52 +01:00
|
|
|
|
2015-10-02 14:24:12 +02:00
|
|
|
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
|
|
|
|
|
2023-04-01 06:30:31 +02:00
|
|
|
/* There are tcg_ctx->insn_start_words here, not just one. */
|
|
|
|
DEF(insn_start, 0, 0, DATA64_ARGS, TCG_OPF_NOT_PRESENT)
|
|
|
|
|
2018-11-27 22:45:08 +01:00
|
|
|
DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
|
|
|
|
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
|
2021-06-29 23:47:39 +02:00
|
|
|
DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
|
2013-09-04 17:11:05 +02:00
|
|
|
|
plugin-gen: add module for TCG-related code
We first inject empty instrumentation from translator_loop.
After translation, we go through the plugins to see what
they want to register for, filling in the empty instrumentation.
If if turns out that some instrumentation remains unused, we
remove it.
This approach supports the following features:
- Inlining TCG code for simple operations. Note that we do not
export TCG ops to plugins. Instead, we give them a C API to
insert inlined ops. So far we only support adding an immediate
to a u64, e.g. to count events.
- "Direct" callbacks. These are callbacks that do not go via
a helper. Instead, the helper is defined at run-time, so that
the plugin code is directly called from TCG. This makes direct
callbacks as efficient as possible; they are therefore used
for very frequent events, e.g. memory callbacks.
- Passing the host address to memory callbacks. Most of this
is implemented in a later patch though.
- Instrumentation of memory accesses performed from helpers.
See the corresponding comment, as well as a later patch.
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2018-12-07 21:33:56 +01:00
|
|
|
DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT)
|
|
|
|
DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT)
|
|
|
|
|
2023-05-17 05:07:20 +02:00
|
|
|
/* Replicate ld/st ops for 32 and 64-bit guest addresses. */
|
|
|
|
DEF(qemu_ld_a32_i32, 1, 1, 1,
|
2014-05-29 22:57:57 +02:00
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
|
2023-05-17 05:07:20 +02:00
|
|
|
DEF(qemu_st_a32_i32, 0, 1 + 1, 1,
|
2014-05-29 22:57:57 +02:00
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
|
2023-05-17 05:07:20 +02:00
|
|
|
DEF(qemu_ld_a32_i64, DATA64_ARGS, 1, 1,
|
2014-05-29 22:57:57 +02:00
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
|
2023-05-17 05:07:20 +02:00
|
|
|
DEF(qemu_st_a32_i64, 0, DATA64_ARGS + 1, 1,
|
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
|
|
|
|
|
|
|
|
DEF(qemu_ld_a64_i32, 1, DATA64_ARGS, 1,
|
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
|
|
|
|
DEF(qemu_st_a64_i32, 0, 1 + DATA64_ARGS, 1,
|
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
|
|
|
|
DEF(qemu_ld_a64_i64, DATA64_ARGS, DATA64_ARGS, 1,
|
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
|
|
|
|
DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1,
|
2014-05-29 22:57:57 +02:00
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
|
|
|
|
|
2020-12-09 20:58:39 +01:00
|
|
|
/* Only used by i386 to cope with stupid register constraints. */
|
2023-05-17 05:07:20 +02:00
|
|
|
DEF(qemu_st8_a32_i32, 0, 1 + 1, 1,
|
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
|
|
|
|
IMPL(TCG_TARGET_HAS_qemu_st8_i32))
|
|
|
|
DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1,
|
2020-12-09 20:58:39 +01:00
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
|
|
|
|
IMPL(TCG_TARGET_HAS_qemu_st8_i32))
|
|
|
|
|
2022-11-07 00:42:56 +01:00
|
|
|
/* Only for 64-bit hosts at the moment. */
|
2023-05-17 05:07:20 +02:00
|
|
|
DEF(qemu_ld_a32_i128, 2, 1, 1,
|
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
|
|
|
|
IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
|
|
|
|
DEF(qemu_ld_a64_i128, 2, 1, 1,
|
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
|
|
|
|
IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
|
|
|
|
DEF(qemu_st_a32_i128, 0, 3, 1,
|
2022-11-07 00:42:56 +01:00
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
|
|
|
|
IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
|
2023-05-17 05:07:20 +02:00
|
|
|
DEF(qemu_st_a64_i128, 0, 3, 1,
|
2022-11-07 00:42:56 +01:00
|
|
|
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
|
|
|
|
IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
|
|
|
|
|
2017-09-14 22:53:46 +02:00
|
|
|
/* Host vector support. */
|
|
|
|
|
|
|
|
#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec)
|
|
|
|
|
|
|
|
DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)
|
|
|
|
|
|
|
|
DEF(dup_vec, 1, 1, 0, IMPLVEC)
|
|
|
|
DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32))
|
|
|
|
|
|
|
|
DEF(ld_vec, 1, 1, 1, IMPLVEC)
|
|
|
|
DEF(st_vec, 0, 2, 1, IMPLVEC)
|
tcg: Add INDEX_op_dupm_vec
Allow the backend to expand dup from memory directly, instead of
forcing the value into a temp first. This is especially important
if integer/vector register moves do not exist.
Note that officially tcg_out_dupm_vec is allowed to fail.
If it did, we could fix this up relatively easily:
VECE == 32/64:
Load the value into a vector register, then dup.
Both of these must work.
VECE == 8/16:
If the value happens to be at an offset such that an aligned
load would place the desired value in the least significant
end of the register, go ahead and load w/garbage in high bits.
Load the value w/INDEX_op_ld{8,16}_i32.
Attempt a move directly to vector reg, which may fail.
Store the value into the backing store for OTS.
Load the value into the vector reg w/TCG_TYPE_I32, which must work.
Duplicate from the vector reg into itself, which must work.
All of which is well and good, except that all supported
hosts can support dupm for all vece, so all of the failure
paths would be dead code and untestable.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-17 02:55:22 +01:00
|
|
|
DEF(dupm_vec, 1, 1, 1, IMPLVEC)
|
2017-09-14 22:53:46 +02:00
|
|
|
|
|
|
|
DEF(add_vec, 1, 2, 0, IMPLVEC)
|
|
|
|
DEF(sub_vec, 1, 2, 0, IMPLVEC)
|
2017-11-21 10:11:14 +01:00
|
|
|
DEF(mul_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_mul_vec))
|
2017-09-14 22:53:46 +02:00
|
|
|
DEF(neg_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_neg_vec))
|
2019-04-18 01:53:02 +02:00
|
|
|
DEF(abs_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_abs_vec))
|
2018-12-18 03:01:47 +01:00
|
|
|
DEF(ssadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
|
|
|
|
DEF(usadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
|
|
|
|
DEF(sssub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
|
|
|
|
DEF(ussub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
|
2018-12-18 04:35:46 +01:00
|
|
|
DEF(smin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
|
|
|
|
DEF(umin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
|
|
|
|
DEF(smax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
|
|
|
|
DEF(umax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
|
2017-09-14 22:53:46 +02:00
|
|
|
|
|
|
|
DEF(and_vec, 1, 2, 0, IMPLVEC)
|
|
|
|
DEF(or_vec, 1, 2, 0, IMPLVEC)
|
|
|
|
DEF(xor_vec, 1, 2, 0, IMPLVEC)
|
|
|
|
DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec))
|
|
|
|
DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec))
|
2021-12-16 20:17:46 +01:00
|
|
|
DEF(nand_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nand_vec))
|
|
|
|
DEF(nor_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nor_vec))
|
|
|
|
DEF(eqv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_eqv_vec))
|
2017-09-14 22:53:46 +02:00
|
|
|
DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec))
|
|
|
|
|
2017-11-17 14:35:11 +01:00
|
|
|
DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
|
|
|
|
DEF(shri_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
|
|
|
|
DEF(sari_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
|
2020-04-20 03:01:52 +02:00
|
|
|
DEF(rotli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_roti_vec))
|
2017-11-17 14:35:11 +01:00
|
|
|
|
|
|
|
DEF(shls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
|
|
|
|
DEF(shrs_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
|
|
|
|
DEF(sars_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
|
2020-04-20 17:22:44 +02:00
|
|
|
DEF(rotls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rots_vec))
|
2017-11-17 14:35:11 +01:00
|
|
|
|
|
|
|
DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
|
|
|
|
DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
|
|
|
|
DEF(sarv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
|
2020-04-20 04:47:59 +02:00
|
|
|
DEF(rotlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec))
|
|
|
|
DEF(rotrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec))
|
2017-11-17 14:35:11 +01:00
|
|
|
|
2017-11-17 20:47:42 +01:00
|
|
|
DEF(cmp_vec, 1, 2, 1, IMPLVEC)
|
|
|
|
|
2019-04-30 20:02:23 +02:00
|
|
|
DEF(bitsel_vec, 1, 3, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_bitsel_vec))
|
2019-04-30 22:01:12 +02:00
|
|
|
DEF(cmpsel_vec, 1, 4, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_cmpsel_vec))
|
2019-04-30 20:02:23 +02:00
|
|
|
|
2017-09-15 23:11:45 +02:00
|
|
|
DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT)
|
|
|
|
|
|
|
|
#if TCG_TARGET_MAYBE_vec
|
|
|
|
#include "tcg-target.opc.h"
|
|
|
|
#endif
|
|
|
|
|
2020-04-17 22:19:47 +02:00
|
|
|
#ifdef TCG_TARGET_INTERPRETER
|
|
|
|
/* These opcodes are only for use between the tci generator and interpreter. */
|
tcg/tci: Change encoding to uint32_t units
This removes all of the problems with unaligned accesses
to the bytecode stream.
With an 8-bit opcode at the bottom, we have 24 bits remaining,
which are generally split into 6 4-bit slots. This fits well
with the maximum length opcodes, e.g. INDEX_op_add2_i32, which
have 6 register operands.
We have, in previous patches, rearranged things such that there
are no operations with a label which have more than one other
operand. Which leaves us with a 20-bit field in which to encode
a label, giving us a maximum TB size of 512k -- easily large.
Change the INDEX_op_tci_movi_{i32,i64} opcodes to tci_mov[il].
The former puts the immediate in the upper 20 bits of the insn,
like we do for the label displacement. The later uses a label
to reference an entry in the constant pool. Thus, in the worst
case we still have a single memory reference for any constant,
but now the constants are out-of-line of the bytecode and can
be shared between different moves saving space.
Change INDEX_op_call to use a label to reference a pair of
pointers in the constant pool. This removes the only slightly
dodgy link with the layout of struct TCGHelperInfo.
The re-encode cannot be done in pieces.
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2021-02-02 08:27:41 +01:00
|
|
|
DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
|
|
|
|
DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
|
2020-04-17 22:19:47 +02:00
|
|
|
#endif
|
|
|
|
|
2014-05-29 22:57:57 +02:00
|
|
|
#undef DATA64_ARGS
|
2011-08-17 23:11:46 +02:00
|
|
|
#undef IMPL
|
|
|
|
#undef IMPL64
|
2017-09-14 22:53:46 +02:00
|
|
|
#undef IMPLVEC
|
2010-06-03 19:40:04 +02:00
|
|
|
#undef DEF
|