From 8da76a8c3fb5246fb08526673f79f9a8d5498336 Mon Sep 17 00:00:00 2001 From: Xiaobaibai <824395314@qq.com> Date: Mon, 12 Jun 2023 11:37:20 +0800 Subject: [PATCH 1/2] ARMv8 neon support --- cl_dll/neon_mathfun.h | 356 +++++++++++++++++++++++++++++++++++++++++ cl_dll/studio_util.cpp | 235 +++++++++++++++++++++++++++ 2 files changed, 591 insertions(+) create mode 100644 cl_dll/neon_mathfun.h diff --git a/cl_dll/neon_mathfun.h b/cl_dll/neon_mathfun.h new file mode 100644 index 00000000..e45b92c1 --- /dev/null +++ b/cl_dll/neon_mathfun.h @@ -0,0 +1,356 @@ +/* NEON implementation of sin, cos, exp and log + + Inspired by Intel Approximate Math library, and based on the + corresponding algorithms of the cephes math library +*/ + +/* Copyright (C) 2011 Julien Pommier + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + (this is the zlib license) +*/ + +#include + +typedef float32x4_t v4sf; // vector of 4 float +typedef uint32x4_t v4su; // vector of 4 uint32 +typedef int32x4_t v4si; // vector of 4 uint32 + +#define s4f_x(s4f) vgetq_lane_f32(s4f, 0) +#define s4f_y(s4f) vgetq_lane_f32(s4f, 1) +#define s4f_z(s4f) vgetq_lane_f32(s4f, 2) +#define s4f_w(s4f) vgetq_lane_f32(s4f, 3) + +#define c_inv_mant_mask ~0x7f800000u +#define c_cephes_SQRTHF 0.707106781186547524 +#define c_cephes_log_p0 7.0376836292E-2 +#define c_cephes_log_p1 - 1.1514610310E-1 +#define c_cephes_log_p2 1.1676998740E-1 +#define c_cephes_log_p3 - 1.2420140846E-1 +#define c_cephes_log_p4 + 1.4249322787E-1 +#define c_cephes_log_p5 - 1.6668057665E-1 +#define c_cephes_log_p6 + 2.0000714765E-1 +#define c_cephes_log_p7 - 2.4999993993E-1 +#define c_cephes_log_p8 + 3.3333331174E-1 +#define c_cephes_log_q1 -2.12194440e-4 +#define c_cephes_log_q2 0.693359375 + +/* natural logarithm computed for 4 simultaneous float + return NaN for x <= 0 +*/ +inline v4sf log_ps(v4sf x) { + v4sf one = vdupq_n_f32(1); + + x = vmaxq_f32(x, vdupq_n_f32(0)); /* force flush to zero on denormal values */ + v4su invalid_mask = vcleq_f32(x, vdupq_n_f32(0)); + + v4si ux = vreinterpretq_s32_f32(x); + + v4si emm0 = vshrq_n_s32(ux, 23); + + /* keep only the fractional part */ + ux = vandq_s32(ux, vdupq_n_s32(c_inv_mant_mask)); + ux = vorrq_s32(ux, vreinterpretq_s32_f32(vdupq_n_f32(0.5f))); + x = vreinterpretq_f32_s32(ux); + + emm0 = vsubq_s32(emm0, vdupq_n_s32(0x7f)); + v4sf e = vcvtq_f32_s32(emm0); + + e = vaddq_f32(e, one); + + /* part2: + if( x < SQRTHF ) { + e -= 1; + x = x + x - 1.0; + } else { x = x - 1.0; } + */ + v4su mask = vcltq_f32(x, vdupq_n_f32(c_cephes_SQRTHF)); + v4sf tmp = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(x), mask)); + x = vsubq_f32(x, one); + e = vsubq_f32(e, vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(one), mask))); + x = vaddq_f32(x, tmp); + + v4sf z = vmulq_f32(x,x); + + v4sf y = vdupq_n_f32(c_cephes_log_p0); + y = vmulq_f32(y, x); + y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p1)); + y = vmulq_f32(y, x); + y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p2)); + y = vmulq_f32(y, x); + y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p3)); + y = vmulq_f32(y, x); + y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p4)); + y = vmulq_f32(y, x); + y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p5)); + y = vmulq_f32(y, x); + y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p6)); + y = vmulq_f32(y, x); + y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p7)); + y = vmulq_f32(y, x); + y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p8)); + y = vmulq_f32(y, x); + + y = vmulq_f32(y, z); + + + tmp = vmulq_f32(e, vdupq_n_f32(c_cephes_log_q1)); + y = vaddq_f32(y, tmp); + + + tmp = vmulq_f32(z, vdupq_n_f32(0.5f)); + y = vsubq_f32(y, tmp); + + tmp = vmulq_f32(e, vdupq_n_f32(c_cephes_log_q2)); + x = vaddq_f32(x, y); + x = vaddq_f32(x, tmp); + x = vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(x), invalid_mask)); // negative arg will be NAN + return x; +} + +#define c_exp_hi 88.3762626647949f +#define c_exp_lo -88.3762626647949f + +#define c_cephes_LOG2EF 1.44269504088896341 +#define c_cephes_exp_C1 0.693359375 +#define c_cephes_exp_C2 -2.12194440e-4 + +#define c_cephes_exp_p0 1.9875691500E-4 +#define c_cephes_exp_p1 1.3981999507E-3 +#define c_cephes_exp_p2 8.3334519073E-3 +#define c_cephes_exp_p3 4.1665795894E-2 +#define c_cephes_exp_p4 1.6666665459E-1 +#define c_cephes_exp_p5 5.0000001201E-1 + +/* exp() computed for 4 float at once */ +inline v4sf exp_ps(v4sf x) { + v4sf tmp, fx; + + v4sf one = vdupq_n_f32(1); + x = vminq_f32(x, vdupq_n_f32(c_exp_hi)); + x = vmaxq_f32(x, vdupq_n_f32(c_exp_lo)); + + /* express exp(x) as exp(g + n*log(2)) */ + fx = vmlaq_f32(vdupq_n_f32(0.5f), x, vdupq_n_f32(c_cephes_LOG2EF)); + + /* perform a floorf */ + tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx)); + + /* if greater, substract 1 */ + v4su mask = vcgtq_f32(tmp, fx); + mask = vandq_u32(mask, vreinterpretq_u32_f32(one)); + + + fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask)); + + tmp = vmulq_f32(fx, vdupq_n_f32(c_cephes_exp_C1)); + v4sf z = vmulq_f32(fx, vdupq_n_f32(c_cephes_exp_C2)); + x = vsubq_f32(x, tmp); + x = vsubq_f32(x, z); + + static const float cephes_exp_p[6] = { c_cephes_exp_p0, c_cephes_exp_p1, c_cephes_exp_p2, c_cephes_exp_p3, c_cephes_exp_p4, c_cephes_exp_p5 }; + v4sf y = vld1q_dup_f32(cephes_exp_p+0); + v4sf c1 = vld1q_dup_f32(cephes_exp_p+1); + v4sf c2 = vld1q_dup_f32(cephes_exp_p+2); + v4sf c3 = vld1q_dup_f32(cephes_exp_p+3); + v4sf c4 = vld1q_dup_f32(cephes_exp_p+4); + v4sf c5 = vld1q_dup_f32(cephes_exp_p+5); + + y = vmulq_f32(y, x); + z = vmulq_f32(x,x); + y = vaddq_f32(y, c1); + y = vmulq_f32(y, x); + y = vaddq_f32(y, c2); + y = vmulq_f32(y, x); + y = vaddq_f32(y, c3); + y = vmulq_f32(y, x); + y = vaddq_f32(y, c4); + y = vmulq_f32(y, x); + y = vaddq_f32(y, c5); + + y = vmulq_f32(y, z); + y = vaddq_f32(y, x); + y = vaddq_f32(y, one); + + /* build 2^n */ + int32x4_t mm; + mm = vcvtq_s32_f32(fx); + mm = vaddq_s32(mm, vdupq_n_s32(0x7f)); + mm = vshlq_n_s32(mm, 23); + v4sf pow2n = vreinterpretq_f32_s32(mm); + + y = vmulq_f32(y, pow2n); + return y; +} + +#define c_minus_cephes_DP1 -0.78515625 +#define c_minus_cephes_DP2 -2.4187564849853515625e-4 +#define c_minus_cephes_DP3 -3.77489497744594108e-8 +#define c_sincof_p0 -1.9515295891E-4 +#define c_sincof_p1 8.3321608736E-3 +#define c_sincof_p2 -1.6666654611E-1 +#define c_coscof_p0 2.443315711809948E-005 +#define c_coscof_p1 -1.388731625493765E-003 +#define c_coscof_p2 4.166664568298827E-002 +#define c_cephes_FOPI 1.27323954473516 // 4 / M_PI + +/* evaluation of 4 sines & cosines at once. + + The code is the exact rewriting of the cephes sinf function. + Precision is excellent as long as x < 8192 (I did not bother to + take into account the special handling they have for greater values + -- it does not return garbage for arguments over 8192, though, but + the extra precision is missing). + + Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the + surprising but correct result. + + Note also that when you compute sin(x), cos(x) is available at + almost no extra price so both sin_ps and cos_ps make use of + sincos_ps.. + */ +inline void sincos_ps(v4sf x, v4sf *ysin, v4sf *ycos) { // any x + v4sf y; + + v4su emm2; + + v4su sign_mask_sin, sign_mask_cos; + sign_mask_sin = vcltq_f32(x, vdupq_n_f32(0)); + x = vabsq_f32(x); + + /* scale by 4/Pi */ + y = vmulq_n_f32(x, c_cephes_FOPI); + + /* store the integer part of y in mm0 */ + emm2 = vcvtq_u32_f32(y); + /* j=(j+1) & (~1) (see the cephes sources) */ + emm2 = vaddq_u32(emm2, vdupq_n_u32(1)); + emm2 = vandq_u32(emm2, vdupq_n_u32(~1)); + y = vcvtq_f32_u32(emm2); + + /* get the polynom selection mask + there is one polynom for 0 <= x <= Pi/4 + and another one for Pi/4 +#include "neon_mathfun.h" +#endif /* ==================== @@ -17,8 +23,59 @@ AngleMatrix ==================== */ +#if XASH_SIMD_NEON +const uint32x4_t AngleMatrix_sign0 = vreinterpretq_f32_u32(vsetq_lane_u32(0x80000000, vdupq_n_u32(0), 0)); +const uint32x4_t AngleMatrix_sign1 = vreinterpretq_f32_u32(vsetq_lane_u32(0x80000000, vdupq_n_u32(0), 1)); +const uint32x4_t AngleMatrix_sign2 = vreinterpretq_f32_u32(vsetq_lane_u32(0x80000000, vdupq_n_u32(0), 2)); +#endif void AngleMatrix( const float *angles, float (*matrix)[4] ) { +#if XASH_SIMD_NEON + float32x4x3_t out_reg; + float32x4_t angles_reg = {}; + memcpy(&angles_reg, angles, sizeof(float) * 3); + + float32x4x2_t sp_sy_sr_0_cp_cy_cr_1; + sincos_ps(vmulq_n_f32(angles_reg, (M_PI * 2 / 360)), &sp_sy_sr_0_cp_cy_cr_1.val[0], &sp_sy_sr_0_cp_cy_cr_1.val[1]); + + float32x4x2_t sp_sr_cp_cr_sy_0_cy_1 = vuzpq_f32(sp_sy_sr_0_cp_cy_cr_1.val[0], sp_sy_sr_0_cp_cy_cr_1.val[1]); + float32x4x2_t sp_cp_sy_cy_sr_cr_0_1 = vzipq_f32(sp_sy_sr_0_cp_cy_cr_1.val[0], sp_sy_sr_0_cp_cy_cr_1.val[1]); + + float32x4_t _0_sr_cr_0 = vextq_f32(sp_sy_sr_0_cp_cy_cr_1.val[0], sp_cp_sy_cy_sr_cr_0_1.val[1], 3); + float32x4_t cp_cr_sr_0 = vcombine_f32(vget_high_f32(sp_sr_cp_cr_sy_0_cy_1.val[0]), vget_high_f32(sp_sy_sr_0_cp_cy_cr_1.val[0])); + float32x4_t cy_sy_sy_0 = vcombine_f32(vrev64_f32(vget_high_f32(sp_cp_sy_cy_sr_cr_0_1.val[0])), vget_low_f32(sp_sr_cp_cr_sy_0_cy_1.val[1])); + float32x4_t sy_cy_cy_1 = vcombine_f32(vget_high_f32(sp_cp_sy_cy_sr_cr_0_1.val[0]), vget_high_f32(sp_sr_cp_cr_sy_0_cy_1.val[1])); + + float32x4_t _0_srsp_crsp_0 = vmulq_laneq_f32(_0_sr_cr_0, sp_sy_sr_0_cp_cy_cr_1.val[0], 0); // *sp + out_reg.val[0] = vmulq_laneq_f32(_0_srsp_crsp_0, sp_sy_sr_0_cp_cy_cr_1.val[1], 1); // *cy + out_reg.val[1] = vmulq_laneq_f32(_0_srsp_crsp_0, sp_sy_sr_0_cp_cy_cr_1.val[0], 1); // *sy + + cy_sy_sy_0 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(cy_sy_sy_0), AngleMatrix_sign1)); + sy_cy_cy_1 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(sy_cy_cy_1), AngleMatrix_sign2)); + out_reg.val[0] = vfmaq_f32(out_reg.val[0], cp_cr_sr_0, cy_sy_sy_0); + out_reg.val[1] = vfmaq_f32(out_reg.val[1], cp_cr_sr_0, sy_cy_cy_1); + + float32x4_t cp_cr_0_1 = vcombine_f32(vget_high_f32(sp_sr_cp_cr_sy_0_cy_1.val[0]), vget_high_f32(sp_cp_sy_cy_sr_cr_0_1.val[1])); + float32x4_t _1_cp_cr_0 = vextq_f32(cp_cr_0_1, cp_cr_0_1, 3); + out_reg.val[2] = vmulq_f32(sp_sr_cp_cr_sy_0_cy_1.val[0], _1_cp_cr_0); + out_reg.val[2] = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(out_reg.val[2]), AngleMatrix_sign0)); + + memcpy(matrix, &out_reg, sizeof(float) * 3 * 4); +/* + matrix[0][0] = cp*cy; + matrix[0][1] = sr*sp*cy-cr*sy; + matrix[0][2] = cr*sp*cy+sr*sy; + matrix[0][3] = 0.0; + matrix[1][0] = cp*sy; + matrix[1][1] = sr*sp*sy+cr*cy; + matrix[1][2] = cr*sp*sy-sr*cy; + matrix[1][3] = 0.0; + matrix[2][0] = -sp*1; + matrix[2][1] = sr*cp; + matrix[2][2] = cp*cr; + matrix[2][3] = cr*0; +*/ +#else float angle; float sr, sp, sy, cr, cp, cy; @@ -45,6 +102,7 @@ void AngleMatrix( const float *angles, float (*matrix)[4] ) matrix[0][3] = 0.0f; matrix[1][3] = 0.0f; matrix[2][3] = 0.0f; +#endif } /* @@ -55,6 +113,13 @@ VectorCompare */ int VectorCompare( const float *v1, const float *v2 ) { +#if XASH_SIMD_NEON + // is this really works? + float32x4_t v1_reg = {}, v2_reg = {}; + memcpy(&v1_reg, v1, sizeof(float) * 3); + memcpy(&v2_reg, v2, sizeof(float) * 3); + return !vaddvq_u32(vceqq_f32(v1_reg, v2_reg)); +#else int i; for( i = 0; i < 3; i++ ) @@ -62,6 +127,7 @@ int VectorCompare( const float *v1, const float *v2 ) return 0; return 1; +#endif } /* @@ -72,9 +138,23 @@ CrossProduct */ void CrossProduct( const float *v1, const float *v2, float *cross ) { +#if XASH_SIMD_NEON + float32x4_t v1_reg = {}, v2_reg = {}; + memcpy(&v1_reg, v1, sizeof(float) * 3); + memcpy(&v2_reg, v2, sizeof(float) * 3); + + float32x4_t yzxy_a = vextq_f32(vextq_f32(v1_reg, v1_reg, 3), v1_reg, 2); // [aj, ak, ai, aj] + float32x4_t yzxy_b = vextq_f32(vextq_f32(v2_reg, v2_reg, 3), v2_reg, 2); // [bj, bk, bi, bj] + float32x4_t zxyy_a = vextq_f32(yzxy_a, yzxy_a, 1); // [ak, ai, aj, aj] + float32x4_t zxyy_b = vextq_f32(yzxy_b, yzxy_b, 1); // [bk, ai, bj, bj] + float32x4_t cross_reg = vfmsq_f32(vmulq_f32(yzxy_a, zxyy_b), zxyy_a, yzxy_b); // [ajbk-akbj, akbi-aibk, aibj-ajbi, 0] + + memcpy(cross, &v2_reg, sizeof(float) * 3); +#else cross[0] = v1[1] * v2[2] - v1[2] * v2[1]; cross[1] = v1[2] * v2[0] - v1[0] * v2[2]; cross[2] = v1[0] * v2[1] - v1[1] * v2[0]; +#endif } /* @@ -85,9 +165,26 @@ VectorTransform */ void VectorTransform( const float *in1, float in2[3][4], float *out ) { +#if XASH_SIMD_NEON + float32x4_t in1_reg = {}; + memcpy(&in1_reg, &in1, sizeof(float) * 3); + + float32x4x4_t in_t; + memcpy(&in_t, &in2, sizeof(float) * 3 * 4); + //memset(&in_t.val[3], 0, sizeof(in_t.val[3])); + in_t = vld4q_f32((const float*)&in_t); + + float32x4_t out_reg = in_t.val[3]; + out_reg = vfmaq_laneq_f32(out_reg, in_t.val[0], in1_reg, 0); + out_reg = vfmaq_laneq_f32(out_reg, in_t.val[1], in1_reg, 1); + out_reg = vfmaq_laneq_f32(out_reg, in_t.val[2], in1_reg, 2); + + memcpy(out, &out_reg, sizeof(float) * 3); +#else out[0] = DotProduct(in1, in2[0]) + in2[0][3]; out[1] = DotProduct(in1, in2[1]) + in2[1][3]; out[2] = DotProduct(in1, in2[2]) + in2[2][3]; +#endif } /* @@ -98,6 +195,29 @@ ConcatTransforms */ void ConcatTransforms( float in1[3][4], float in2[3][4], float out[3][4] ) { +#if XASH_SIMD_NEON + float32x4x3_t in1_reg, in2_reg; + memcpy(&in1_reg, in1, sizeof(float) * 3 * 4); + memcpy(&in2_reg, in2, sizeof(float) * 3 * 4); + float32x4x3_t out_reg = {}; + + out_reg.val[0] = vcopyq_laneq_f32(out_reg.val[0], 3, in1_reg.val[0], 3); // out[0][3] = in[0][3] + out_reg.val[0] = vfmaq_laneq_f32(out_reg.val[0], in2_reg.val[0], in1_reg.val[0], 0); // out[0][n] += in2[0][n] * in1[0][0] + out_reg.val[0] = vfmaq_laneq_f32(out_reg.val[0], in2_reg.val[1], in1_reg.val[0], 1); // out[0][n] += in2[1][n] * in1[0][1] + out_reg.val[0] = vfmaq_laneq_f32(out_reg.val[0], in2_reg.val[2], in1_reg.val[0], 2); // out[0][n] += in2[2][n] * in1[0][2] + + out_reg.val[1] = vcopyq_laneq_f32(out_reg.val[1], 3, in1_reg.val[1], 3); + out_reg.val[1] = vfmaq_laneq_f32(out_reg.val[1], in2_reg.val[0], in1_reg.val[1], 0); + out_reg.val[1] = vfmaq_laneq_f32(out_reg.val[1], in2_reg.val[1], in1_reg.val[1], 1); + out_reg.val[1] = vfmaq_laneq_f32(out_reg.val[1], in2_reg.val[2], in1_reg.val[1], 2); + + out_reg.val[2] = vcopyq_laneq_f32(out_reg.val[2], 3, in1_reg.val[2], 3); + out_reg.val[2] = vfmaq_laneq_f32(out_reg.val[2], in2_reg.val[0], in1_reg.val[2], 0); + out_reg.val[2] = vfmaq_laneq_f32(out_reg.val[2], in2_reg.val[1], in1_reg.val[2], 1); + out_reg.val[2] = vfmaq_laneq_f32(out_reg.val[2], in2_reg.val[2], in1_reg.val[2], 2); + + memcpy(&out, &out_reg, sizeof(out)); +#else out[0][0] = in1[0][0] * in2[0][0] + in1[0][1] * in2[1][0] + in1[0][2] * in2[2][0]; out[0][1] = in1[0][0] * in2[0][1] + in1[0][1] * in2[1][1] + @@ -122,6 +242,7 @@ void ConcatTransforms( float in1[3][4], float in2[3][4], float out[3][4] ) in1[2][2] * in2[2][2]; out[2][3] = in1[2][0] * in2[0][3] + in1[2][1] * in2[1][3] + in1[2][2] * in2[2][3] + in1[2][3]; +#endif } // angles index are not the same as ROLL, PITCH, YAW @@ -132,8 +253,36 @@ AngleQuaternion ==================== */ +#if XASH_SIMD_NEON +const float32x4_t AngleQuaternion_sign2 = vzipq_f32(vdupq_n_u32(0x80000000), vdupq_n_u32(0x00000000)).val[0]; // { 0x80000000, 0x00000000, 0x80000000, 0x00000000 }; +#endif void AngleQuaternion( float *angles, vec4_t quaternion ) { +#if XASH_SIMD_NEON + float32x4_t angles_reg = {}; + memcpy(&angles_reg, angles, sizeof(float) * 3); + float32x4x2_t sr_sp_sy_0_cr_cp_cy_1; + sincos_ps(vmulq_n_f32(angles_reg, 0.5), &sr_sp_sy_0_cr_cp_cy_1.val[0], &sr_sp_sy_0_cr_cp_cy_1.val[1]); + + float32x4x2_t sr_sy_cr_cy_sp_0_cp_1 = vuzpq_f32(sr_sp_sy_0_cr_cp_cy_1.val[0], sr_sp_sy_0_cr_cp_cy_1.val[1]); + float32x4_t cp_cp_cp_cp = vdupq_laneq_f32(sr_sp_sy_0_cr_cp_cy_1.val[1], 1); + float32x4_t sp_sp_sp_sp = vdupq_laneq_f32(sr_sp_sy_0_cr_cp_cy_1.val[0], 1); + + float32x4_t sr_sy_cr_cy = sr_sy_cr_cy_sp_0_cp_1.val[0]; + float32x4_t sy_cr_cy_sr = vextq_f32(sr_sy_cr_cy_sp_0_cp_1.val[0], sr_sy_cr_cy_sp_0_cp_1.val[0], 1); + float32x4_t cr_cy_sr_sy = vextq_f32(sr_sy_cr_cy_sp_0_cp_1.val[0], sr_sy_cr_cy_sp_0_cp_1.val[0], 2); + float32x4_t cy_sr_sy_cr = vextq_f32(sr_sy_cr_cy_sp_0_cp_1.val[0], sr_sy_cr_cy_sp_0_cp_1.val[0], 3); + float32x4_t sp_sp_sp_sp_signed = veorq_u32(sp_sp_sp_sp, AngleQuaternion_sign2); + + float32x4_t left = vmulq_f32(vmulq_f32(sr_sy_cr_cy, cp_cp_cp_cp), cy_sr_sy_cr); + + float32x4_t out_reg = vfmaq_f32(left, vmulq_f32(cr_cy_sr_sy, sp_sp_sp_sp_signed), sy_cr_cy_sr); + memcpy(quaternion, &out_reg, sizeof(float) * 4); + //quaternion[0] = sr * cp * cy - cr * sp * sy; // X + //quaternion[1] = sy * cp * sr + cy * sp * cr; // Y + //quaternion[2] = cr * cp * sy - sr * sp * cy; // Z + //quaternion[3] = cy * cp * cr + sy * sp * sr; // W +#else float angle; float sr, sp, sy, cr, cp, cy; @@ -152,6 +301,7 @@ void AngleQuaternion( float *angles, vec4_t quaternion ) quaternion[1] = cr * sp * cy + sr * cp * sy; // Y quaternion[2] = cr * cp * sy - sr * sp * cy; // Z quaternion[3] = cr * cp * cy + sr * sp * sy; // W +#endif } /* @@ -162,6 +312,37 @@ QuaternionSlerp */ void QuaternionSlerp( vec4_t p, vec4_t q, float t, vec4_t qt ) { +#if XASH_SIMD_NEON + float32x4_t p_reg = {}, q_reg = {}; + memcpy(&p_reg, p, sizeof(float) * 4); + memcpy(&q_reg, q, sizeof(float) * 4); + + // q = (cos(a/2), xsin(a/2), ysin(a/2), zsin(a/2)) + // cos(a-b) = cosacosb+sinasinb + const uint32x4_t signmask = vdupq_n_u32(0x80000000); + const float32x4_t one_minus_epsilon = vdupq_n_f32(1.0f - 0.00001f); + + float32x4_t vcosom = vdupq_n_f32(DotProduct(p, q)); + uint32x4_t sign = vandq_u32(vreinterpretq_u32_f32(vcosom), signmask); + q_reg = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(p_reg), sign)); + vcosom = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(vcosom), sign)); + + float x[4] = {(1.0f - t), t, 1, 0}; // cosom -> 1, sinom -> 0, sinx ~ x + float32x4_t x_reg; + memcpy(&x_reg, x, sizeof(float) * 4); + + // if ((1.0 - cosom) > 0.000001) x = sin(x * omega) + uint32x4_t cosom_less_then_one = vcltq_f32(vcosom, one_minus_epsilon); + float32x4_t vomega = acos_ps(vcosom); + x_reg = vbslq_f32(cosom_less_then_one, x_reg, sin_ps(vmulq_f32(x_reg, vomega))); + + // qt = (x[0] * p + x[1] * q) / x[2]; + float32x4_t qt_reg = vmulq_laneq_f32(p_reg, x_reg, 0); + qt_reg = vfmaq_laneq_f32(qt_reg, q_reg, x_reg, 1); + qt_reg = vdivq_f32(qt_reg, vdupq_laneq_f32(x_reg, 2)); // vdivq_laneq_f32 ? + + memcpy(qt, &qt_reg, sizeof(float) * 4); +#else int i; float omega, cosom, sinom, sclp, sclq; @@ -216,6 +397,7 @@ void QuaternionSlerp( vec4_t p, vec4_t q, float t, vec4_t qt ) qt[i] = sclp * p[i] + sclq * qt[i]; } } +#endif } /* @@ -224,8 +406,60 @@ QuaternionMatrix ==================== */ +#if XASH_SIMD_NEON +const uint32x4_t QuaternionMatrix_sign1 = vsetq_lane_u32(0x80000000, vdupq_n_u32(0x00000000), 0); // { 0x80000000, 0x00000000, 0x00000000, 0x00000000 }; +const uint32x4_t QuaternionMatrix_sign2 = vsetq_lane_u32(0x80000000, vdupq_n_u32(0x00000000), 1); // { 0x00000000, 0x80000000, 0x00000000, 0x00000000 }; +const uint32x4_t QuaternionMatrix_sign3 = vsetq_lane_u32(0x00000000, vdupq_n_u32(0x80000000), 2); // { 0x80000000, 0x80000000, 0x00000000, 0x80000000 }; +const float32x4_t matrix3x4_identity_0 = vsetq_lane_f32(1, vdupq_n_f32(0), 0); // { 1, 0, 0, 0 } +const float32x4_t matrix3x4_identity_1 = vsetq_lane_f32(1, vdupq_n_f32(0), 1); // { 0, 1, 0, 0 } +const float32x4_t matrix3x4_identity_2 = vsetq_lane_f32(1, vdupq_n_f32(0), 2); // { 0, 0, 1, 0 } +#endif + void QuaternionMatrix( vec4_t quaternion, float (*matrix)[4] ) { +#if XASH_SIMD_NEON + float32x4_t quaternion_reg; + memcpy(&quaternion_reg, quaternion, sizeof(float) * 4); + + float32x4_t q1032 = vrev64q_f32(quaternion_reg); + float32x4_t q1032_signed = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(q1032), QuaternionMatrix_sign1)); + float32x4_t q2301 = vextq_f32(quaternion_reg, quaternion_reg, 2); + float32x4_t q2301_signed = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(q2301), QuaternionMatrix_sign3)); + float32x4_t q3210 = vrev64q_f32(q2301); + float32x4_t q3210_signed = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(q3210), QuaternionMatrix_sign2)); + + float32x4x3_t out_reg; + + out_reg.val[0] = vmulq_laneq_f32(q2301_signed, quaternion_reg, 2); + out_reg.val[0] = vfmaq_laneq_f32(out_reg.val[0], q1032_signed, quaternion_reg, 1); + out_reg.val[0] = vfmaq_n_f32(matrix3x4_identity_0, out_reg.val[0], 2.0f); + + out_reg.val[1] = vmulq_laneq_f32(q3210_signed, quaternion_reg, 2); + out_reg.val[1] = vfmsq_laneq_f32(out_reg.val[1], q1032_signed, quaternion_reg, 0); + out_reg.val[1] = vfmaq_n_f32(matrix3x4_identity_1, out_reg.val[1], 2.0f); + + out_reg.val[2] = vmulq_laneq_f32(q3210_signed, quaternion_reg, 1); + out_reg.val[2] = vfmaq_laneq_f32(out_reg.val[2], q2301_signed, quaternion_reg, 0); + out_reg.val[2] = vfmsq_n_f32(matrix3x4_identity_2, out_reg.val[2], 2.0f); + + memcpy(matrix, &out_reg, sizeof(float) * 3 * 4); +/* + matrix[0][0] = 1.0 + 2.0 * ( quaternion[1] * -quaternion[1] + -quaternion[2] * quaternion[2] ); + matrix[0][1] = 0.0 + 2.0 * ( quaternion[1] * quaternion[0] + -quaternion[3] * quaternion[2] ); + matrix[0][2] = 0.0 + 2.0 * ( quaternion[1] * quaternion[3] + quaternion[0] * quaternion[2] ); + matrix[0][3] = 0.0 + 2.0 * ( quaternion[1] * quaternion[2] + -quaternion[1] * quaternion[2] ); + + matrix[1][0] = 0.0 + 2.0 * ( -quaternion[0] * -quaternion[1] + quaternion[3] * quaternion[2] ); + matrix[1][1] = 1.0 + 2.0 * ( -quaternion[0] * quaternion[0] + -quaternion[2] * quaternion[2] ); + matrix[1][2] = 0.0 + 2.0 * ( -quaternion[0] * quaternion[3] + quaternion[1] * quaternion[2] ); + matrix[1][3] = 0.0 + 2.0 * ( -quaternion[0] * quaternion[2] + quaternion[0] * quaternion[2] ); + + matrix[2][0] = 0.0 + 2.0 * ( -quaternion[0] * -quaternion[2] + -quaternion[3] * quaternion[1] ); + matrix[2][1] = 0.0 + 2.0 * ( -quaternion[0] * -quaternion[3] + quaternion[2] * quaternion[1] ); + matrix[2][2] = 1.0 + 2.0 * ( -quaternion[0] * quaternion[0] + -quaternion[1] * quaternion[1] ); + matrix[2][3] = 0.0 + 2.0 * ( -quaternion[0] * -quaternion[1] + -quaternion[0] * quaternion[1] ); +*/ +#else matrix[0][0] = 1.0f - 2.0f * quaternion[1] * quaternion[1] - 2.0f * quaternion[2] * quaternion[2]; matrix[1][0] = 2.0f * quaternion[0] * quaternion[1] + 2.0f * quaternion[3] * quaternion[2]; matrix[2][0] = 2.0f * quaternion[0] * quaternion[2] - 2.0f * quaternion[3] * quaternion[1]; @@ -237,6 +471,7 @@ void QuaternionMatrix( vec4_t quaternion, float (*matrix)[4] ) matrix[0][2] = 2.0f * quaternion[0] * quaternion[2] + 2.0f * quaternion[3] * quaternion[1]; matrix[1][2] = 2.0f * quaternion[1] * quaternion[2] - 2.0f * quaternion[3] * quaternion[0]; matrix[2][2] = 1.0f - 2.0f * quaternion[0] * quaternion[0] - 2.0f * quaternion[1] * quaternion[1]; +#endif } /* From d8e4ec04e4927c7050e0bb10fe63d53e54e9e787 Mon Sep 17 00:00:00 2001 From: Xiaobaibai <824395314@qq.com> Date: Fri, 14 Jul 2023 10:17:31 +0800 Subject: [PATCH 2/2] fix typo ( no xref at all? ) --- cl_dll/studio_util.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cl_dll/studio_util.cpp b/cl_dll/studio_util.cpp index 3b9af7de..2d4bad80 100644 --- a/cl_dll/studio_util.cpp +++ b/cl_dll/studio_util.cpp @@ -149,7 +149,7 @@ void CrossProduct( const float *v1, const float *v2, float *cross ) float32x4_t zxyy_b = vextq_f32(yzxy_b, yzxy_b, 1); // [bk, ai, bj, bj] float32x4_t cross_reg = vfmsq_f32(vmulq_f32(yzxy_a, zxyy_b), zxyy_a, yzxy_b); // [ajbk-akbj, akbi-aibk, aibj-ajbi, 0] - memcpy(cross, &v2_reg, sizeof(float) * 3); + memcpy(cross, &cross_reg, sizeof(float) * 3); #else cross[0] = v1[1] * v2[2] - v1[2] * v2[1]; cross[1] = v1[2] * v2[0] - v1[0] * v2[2];