From c065d71a2fec192085a5741195135f7d4f5f31f7 Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Fri, 30 Apr 2021 21:12:17 +0300 Subject: [PATCH 01/14] simd, sse: optimize mat4 inv with sse * reduce a few shufflees * re-oder instructions for ILP --- include/cglm/simd/sse2/mat4.h | 160 ++++++++++++++++++++++------------ 1 file changed, 104 insertions(+), 56 deletions(-) diff --git a/include/cglm/simd/sse2/mat4.h b/include/cglm/simd/sse2/mat4.h index f5d7135..8545958 100644 --- a/include/cglm/simd/sse2/mat4.h +++ b/include/cglm/simd/sse2/mat4.h @@ -143,97 +143,121 @@ glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) { v0, v1, v2, v3, t0, t1, t2, t3, t4, t5, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9; - + x8 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f); x9 = glmm_shuff1(x8, 2, 1, 2, 1); - + /* 127 <- 0 */ r0 = glmm_load(mat[0]); /* d c b a */ r1 = glmm_load(mat[1]); /* h g f e */ r2 = glmm_load(mat[2]); /* l k j i */ r3 = glmm_load(mat[3]); /* p o n m */ - - x0 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(3, 2, 3, 2)); /* p o l k */ - x1 = glmm_shuff1(x0, 1, 3, 3, 3); /* l p p p */ + + x0 = _mm_movehl_ps(r3, r2); /* p o l k */ + x3 = _mm_movelh_ps(r2, r3); /* n m j i */ + x1 = glmm_shuff1(x0, 1, 3, 3 ,3); /* l p p p */ x2 = glmm_shuff1(x0, 0, 2, 2, 2); /* k o o o */ - x0 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(3, 3, 3, 3)); /* h h l l */ - x3 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(2, 2, 2, 2)); /* g g k k */ + x4 = glmm_shuff1(x3, 1, 3, 3, 3); /* j n n n */ + x7 = glmm_shuff1(x3, 0, 2, 2, 2); /* i m m m */ + x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */ + x5 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(1, 1, 1, 1)); /* f f j j */ + x3 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(2, 2, 2, 2)); /* g g k k */ + x0 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(3, 3, 3, 3)); /* h h l l */ + + t0 = _mm_mul_ps(x3, x1); + t1 = _mm_mul_ps(x5, x1); + t2 = _mm_mul_ps(x5, x2); + t3 = _mm_mul_ps(x6, x1); + t4 = _mm_mul_ps(x6, x2); + t5 = _mm_mul_ps(x6, x4); + /* t1[0] = k * p - o * l; t1[0] = k * p - o * l; t2[0] = g * p - o * h; t3[0] = g * l - k * h; */ - t0 = glmm_fnmadd(x2, x0, _mm_mul_ps(x3, x1)); + t0 = glmm_fnmadd(x2, x0, t0); - x4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(2, 1, 2, 1)); /* o n k j */ - x4 = glmm_shuff1(x4, 0, 2, 2, 2); /* j n n n */ - x5 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(1, 1, 1, 1)); /* f f j j */ - /* t1[1] = j * p - n * l; t1[1] = j * p - n * l; t2[1] = f * p - n * h; t3[1] = f * l - j * h; */ - t1 = glmm_fnmadd(x4, x0, _mm_mul_ps(x5, x1)); + t1 = glmm_fnmadd(x4, x0, t1); /* t1[2] = j * o - n * k t1[2] = j * o - n * k; t2[2] = f * o - n * g; t3[2] = f * k - j * g; */ - t2 = glmm_fnmadd(x4, x3, _mm_mul_ps(x5, x2)); + t2 = glmm_fnmadd(x4, x3, t2); - x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */ - x7 = glmm_shuff2(r3, r2, 0, 0, 0, 0, 2, 0, 0, 0); /* i m m m */ - /* t1[3] = i * p - m * l; t1[3] = i * p - m * l; t2[3] = e * p - m * h; t3[3] = e * l - i * h; */ - t3 = glmm_fnmadd(x7, x0, _mm_mul_ps(x6, x1)); + t3 = glmm_fnmadd(x7, x0, t3); /* t1[4] = i * o - m * k; t1[4] = i * o - m * k; t2[4] = e * o - m * g; t3[4] = e * k - i * g; */ - t4 = glmm_fnmadd(x7, x3, _mm_mul_ps(x6, x2)); + t4 = glmm_fnmadd(x7, x3, t4); /* t1[5] = i * n - m * j; t1[5] = i * n - m * j; t2[5] = e * n - m * f; t3[5] = e * j - i * f; */ - t5 = glmm_fnmadd(x7, x5, _mm_mul_ps(x6, x4)); + t5 = glmm_fnmadd(x7, x5, t5); - x0 = glmm_shuff2(r1, r0, 0, 0, 0, 0, 2, 2, 2, 0); /* a a a e */ - x1 = glmm_shuff2(r1, r0, 1, 1, 1, 1, 2, 2, 2, 0); /* b b b f */ - x2 = glmm_shuff2(r1, r0, 2, 2, 2, 2, 2, 2, 2, 0); /* c c c g */ - x3 = glmm_shuff2(r1, r0, 3, 3, 3, 3, 2, 2, 2, 0); /* d d d h */ + x4 = _mm_movelh_ps(r0, r1); /* f e b a */ + x5 = _mm_movehl_ps(r1, r0); /* h g d c */ + + x0 = glmm_shuff1(x4, 0, 0, 0, 2); /* a a a e */ + x1 = glmm_shuff1(x4, 1, 1, 1, 3); /* b b b f */ + x2 = glmm_shuff1(x5, 0, 0, 0, 2); /* c c c g */ + x3 = glmm_shuff1(x5, 1, 1, 1, 3); /* d d d h */ + + v2 = _mm_mul_ps(x0, t1); + v1 = _mm_mul_ps(x0, t0); + v3 = _mm_mul_ps(x0, t2); + v0 = _mm_mul_ps(x1, t0); + + v2 = glmm_fnmadd(x1, t3, v2); + v3 = glmm_fnmadd(x1, t4, v3); + v0 = glmm_fnmadd(x2, t1, v0); + v1 = glmm_fnmadd(x2, t3, v1); + + v3 = glmm_fmadd(x2, t5, v3); + v0 = glmm_fmadd(x3, t2, v0); + v2 = glmm_fmadd(x3, t5, v2); + v1 = glmm_fmadd(x3, t4, v1); /* dest[0][0] = f * t1[0] - g * t1[1] + h * t1[2]; dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]); dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2]; dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */ - v0 = _mm_xor_ps(glmm_fmadd(x3, t2, glmm_fnmadd(x2, t1, _mm_mul_ps(x1, t0))), x8); + v0 = _mm_xor_ps(v0, x8); /* dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5]; dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]); dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5]; dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/ - v2 = _mm_xor_ps(glmm_fmadd(x3, t5, glmm_fnmadd(x1, t3, _mm_mul_ps(x0, t1))), x8); + v2 = _mm_xor_ps(v2, x8); /* dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]); dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4]; dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]); dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */ - v1 = _mm_xor_ps(glmm_fmadd(x3, t4, glmm_fnmadd(x2, t3, _mm_mul_ps(x0, t0))), x9); + v1 = _mm_xor_ps(v1, x9); /* dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]); dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5]; dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]); dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */ - v3 = _mm_xor_ps(glmm_fmadd(x2, t5, glmm_fnmadd(x1, t4, _mm_mul_ps(x0, t2))), x9); + v3 = _mm_xor_ps(v3, x9); /* determinant */ x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0)); @@ -255,97 +279,121 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) { v0, v1, v2, v3, t0, t1, t2, t3, t4, t5, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9; - + x8 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f); x9 = glmm_shuff1(x8, 2, 1, 2, 1); - + /* 127 <- 0 */ r0 = glmm_load(mat[0]); /* d c b a */ r1 = glmm_load(mat[1]); /* h g f e */ r2 = glmm_load(mat[2]); /* l k j i */ r3 = glmm_load(mat[3]); /* p o n m */ - - x0 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(3, 2, 3, 2)); /* p o l k */ - x1 = glmm_shuff1(x0, 1, 3, 3, 3); /* l p p p */ + + x0 = _mm_movehl_ps(r3, r2); /* p o l k */ + x3 = _mm_movelh_ps(r2, r3); /* n m j i */ + x1 = glmm_shuff1(x0, 1, 3, 3 ,3); /* l p p p */ x2 = glmm_shuff1(x0, 0, 2, 2, 2); /* k o o o */ - x0 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(3, 3, 3, 3)); /* h h l l */ - x3 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(2, 2, 2, 2)); /* g g k k */ + x4 = glmm_shuff1(x3, 1, 3, 3, 3); /* j n n n */ + x7 = glmm_shuff1(x3, 0, 2, 2, 2); /* i m m m */ + x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */ + x5 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(1, 1, 1, 1)); /* f f j j */ + x3 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(2, 2, 2, 2)); /* g g k k */ + x0 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(3, 3, 3, 3)); /* h h l l */ + + t0 = _mm_mul_ps(x3, x1); + t1 = _mm_mul_ps(x5, x1); + t2 = _mm_mul_ps(x5, x2); + t3 = _mm_mul_ps(x6, x1); + t4 = _mm_mul_ps(x6, x2); + t5 = _mm_mul_ps(x6, x4); + /* t1[0] = k * p - o * l; t1[0] = k * p - o * l; t2[0] = g * p - o * h; t3[0] = g * l - k * h; */ - t0 = glmm_fnmadd(x2, x0, _mm_mul_ps(x3, x1)); + t0 = glmm_fnmadd(x2, x0, t0); - x4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(2, 1, 2, 1)); /* o n k j */ - x4 = glmm_shuff1(x4, 0, 2, 2, 2); /* j n n n */ - x5 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(1, 1, 1, 1)); /* f f j j */ - /* t1[1] = j * p - n * l; t1[1] = j * p - n * l; t2[1] = f * p - n * h; t3[1] = f * l - j * h; */ - t1 = glmm_fnmadd(x4, x0, _mm_mul_ps(x5, x1)); + t1 = glmm_fnmadd(x4, x0, t1); /* t1[2] = j * o - n * k t1[2] = j * o - n * k; t2[2] = f * o - n * g; t3[2] = f * k - j * g; */ - t2 = glmm_fnmadd(x4, x3, _mm_mul_ps(x5, x2)); + t2 = glmm_fnmadd(x4, x3, t2); - x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */ - x7 = glmm_shuff2(r3, r2, 0, 0, 0, 0, 2, 0, 0, 0); /* i m m m */ - /* t1[3] = i * p - m * l; t1[3] = i * p - m * l; t2[3] = e * p - m * h; t3[3] = e * l - i * h; */ - t3 = glmm_fnmadd(x7, x0, _mm_mul_ps(x6, x1)); + t3 = glmm_fnmadd(x7, x0, t3); /* t1[4] = i * o - m * k; t1[4] = i * o - m * k; t2[4] = e * o - m * g; t3[4] = e * k - i * g; */ - t4 = glmm_fnmadd(x7, x3, _mm_mul_ps(x6, x2)); + t4 = glmm_fnmadd(x7, x3, t4); /* t1[5] = i * n - m * j; t1[5] = i * n - m * j; t2[5] = e * n - m * f; t3[5] = e * j - i * f; */ - t5 = glmm_fnmadd(x7, x5, _mm_mul_ps(x6, x4)); + t5 = glmm_fnmadd(x7, x5, t5); - x0 = glmm_shuff2(r1, r0, 0, 0, 0, 0, 2, 2, 2, 0); /* a a a e */ - x1 = glmm_shuff2(r1, r0, 1, 1, 1, 1, 2, 2, 2, 0); /* b b b f */ - x2 = glmm_shuff2(r1, r0, 2, 2, 2, 2, 2, 2, 2, 0); /* c c c g */ - x3 = glmm_shuff2(r1, r0, 3, 3, 3, 3, 2, 2, 2, 0); /* d d d h */ + x4 = _mm_movelh_ps(r0, r1); /* f e b a */ + x5 = _mm_movehl_ps(r1, r0); /* h g d c */ + + x0 = glmm_shuff1(x4, 0, 0, 0, 2); /* a a a e */ + x1 = glmm_shuff1(x4, 1, 1, 1, 3); /* b b b f */ + x2 = glmm_shuff1(x5, 0, 0, 0, 2); /* c c c g */ + x3 = glmm_shuff1(x5, 1, 1, 1, 3); /* d d d h */ + + v2 = _mm_mul_ps(x0, t1); + v1 = _mm_mul_ps(x0, t0); + v3 = _mm_mul_ps(x0, t2); + v0 = _mm_mul_ps(x1, t0); + + v2 = glmm_fnmadd(x1, t3, v2); + v3 = glmm_fnmadd(x1, t4, v3); + v0 = glmm_fnmadd(x2, t1, v0); + v1 = glmm_fnmadd(x2, t3, v1); + + v3 = glmm_fmadd(x2, t5, v3); + v0 = glmm_fmadd(x3, t2, v0); + v2 = glmm_fmadd(x3, t5, v2); + v1 = glmm_fmadd(x3, t4, v1); /* dest[0][0] = f * t1[0] - g * t1[1] + h * t1[2]; dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]); dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2]; dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */ - v0 = _mm_xor_ps(glmm_fmadd(x3, t2, glmm_fnmadd(x2, t1, _mm_mul_ps(x1, t0))), x8); + v0 = _mm_xor_ps(v0, x8); /* dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5]; dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]); dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5]; dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/ - v2 = _mm_xor_ps(glmm_fmadd(x3, t5, glmm_fnmadd(x1, t3, _mm_mul_ps(x0, t1))), x8); + v2 = _mm_xor_ps(v2, x8); /* dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]); dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4]; dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]); dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */ - v1 = _mm_xor_ps(glmm_fmadd(x3, t4, glmm_fnmadd(x2, t3, _mm_mul_ps(x0, t0))), x9); + v1 = _mm_xor_ps(v1, x9); /* dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]); dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5]; dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]); dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */ - v3 = _mm_xor_ps(glmm_fmadd(x2, t5, glmm_fnmadd(x1, t4, _mm_mul_ps(x0, t2))), x9); + v3 = _mm_xor_ps(v3, x9); /* determinant */ x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0)); From a90f706e124412b3aa59f517cad8bfc0f4da05ab Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Sat, 1 May 2021 02:17:08 +0300 Subject: [PATCH 02/14] simd, sse: optimize mat4 mul with sse * re-oder instructions for ILP --- include/cglm/simd/sse2/mat4.h | 51 ++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/include/cglm/simd/sse2/mat4.h b/include/cglm/simd/sse2/mat4.h index 8545958..5b7befa 100644 --- a/include/cglm/simd/sse2/mat4.h +++ b/include/cglm/simd/sse2/mat4.h @@ -49,28 +49,41 @@ void glm_mat4_mul_sse2(mat4 m1, mat4 m2, mat4 dest) { /* D = R * L (Column-Major) */ - __m128 l0, l1, l2, l3, r; + glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3; - l0 = glmm_load(m1[0]); - l1 = glmm_load(m1[1]); - l2 = glmm_load(m1[2]); - l3 = glmm_load(m1[3]); - -#define XX(C) \ - \ - r = glmm_load(m2[C]); \ - glmm_store(dest[C], \ - glmm_fmadd(glmm_splat(r, 0), l0, \ - glmm_fmadd(glmm_splat(r, 1), l1, \ - glmm_fmadd(glmm_splat(r, 2), l2, \ - _mm_mul_ps(glmm_splat(r, 3), l3))))); + l = glmm_load(m1[0]); + r0 = glmm_load(m2[0]); + r1 = glmm_load(m2[1]); + r2 = glmm_load(m2[2]); + r3 = glmm_load(m2[3]); - XX(0); - XX(1); - XX(2); - XX(3); + v0 = _mm_mul_ps(glmm_splat_x(r0), l); + v1 = _mm_mul_ps(glmm_splat_x(r1), l); + v2 = _mm_mul_ps(glmm_splat_x(r2), l); + v3 = _mm_mul_ps(glmm_splat_x(r3), l); -#undef XX + l = glmm_load(m1[1]); + v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_y(r3), l, v3); + + l = glmm_load(m1[2]); + v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_z(r3), l, v3); + + l = glmm_load(m1[3]); + v0 = glmm_fmadd(glmm_splat_w(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_w(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_w(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_w(r3), l, v3); + + glmm_store(dest[0], v0); + glmm_store(dest[1], v1); + glmm_store(dest[2], v2); + glmm_store(dest[3], v3); } CGLM_INLINE From 3673622cc35439695e625c3c78afaa4e75700704 Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Sat, 1 May 2021 02:17:34 +0300 Subject: [PATCH 03/14] simd, sse: optimize mat4 mul-v with sse * re-oder instructions for ILP --- include/cglm/simd/sse2/mat4.h | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/include/cglm/simd/sse2/mat4.h b/include/cglm/simd/sse2/mat4.h index 5b7befa..5c78499 100644 --- a/include/cglm/simd/sse2/mat4.h +++ b/include/cglm/simd/sse2/mat4.h @@ -89,15 +89,24 @@ glm_mat4_mul_sse2(mat4 m1, mat4 m2, mat4 dest) { CGLM_INLINE void glm_mat4_mulv_sse2(mat4 m, vec4 v, vec4 dest) { - __m128 x0, x1; - + __m128 x0, x1, m0, m1, m2, m3, v0, v1, v2, v3; + + m0 = glmm_load(m[0]); + m1 = glmm_load(m[1]); + m2 = glmm_load(m[2]); + m3 = glmm_load(m[3]); + x0 = glmm_load(v); - x1 = glmm_fmadd(glmm_load(m[0]), glmm_splat(x0, 0), - glmm_fmadd(glmm_load(m[1]), glmm_splat(x0, 1), - glmm_fmadd(glmm_load(m[2]), glmm_splat(x0, 2), - _mm_mul_ps(glmm_load(m[3]), - glmm_splat(x0, 3))))); - + v0 = glmm_splat_x(x0); + v1 = glmm_splat_y(x0); + v2 = glmm_splat_z(x0); + v3 = glmm_splat_w(x0); + + x1 = _mm_mul_ps(m3, v3); + x1 = glmm_fmadd(m2, v2, x1); + x1 = glmm_fmadd(m1, v1, x1); + x1 = glmm_fmadd(m0, v0, x1); + glmm_store(dest, x1); } From d28b381dd69bd2f634c40a0f04d2c280c9f15a6b Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Sat, 1 May 2021 02:45:15 +0300 Subject: [PATCH 04/14] armi neon: optimize mat4 mul with neon --- include/cglm/simd/neon/mat4.h | 46 +++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/include/cglm/simd/neon/mat4.h b/include/cglm/simd/neon/mat4.h index bb00a41..5b9f014 100644 --- a/include/cglm/simd/neon/mat4.h +++ b/include/cglm/simd/neon/mat4.h @@ -43,32 +43,36 @@ void glm_mat4_mul_neon(mat4 m1, mat4 m2, mat4 dest) { /* D = R * L (Column-Major) */ - glmm_128 l0, l1, l2, l3, r0, r1, r2, r3, v0, v1, v2, v3; + glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3; - l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]); - l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]); - l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]); - l3 = glmm_load(m1[3]); r3 = glmm_load(m2[3]); + l = glmm_load(m1[0]); + r0 = glmm_load(m2[0]); + r1 = glmm_load(m2[1]); + r2 = glmm_load(m2[2]); + r3 = glmm_load(m2[3]); - v0 = vmulq_f32(glmm_splat_x(r0), l0); - v1 = vmulq_f32(glmm_splat_x(r1), l0); - v2 = vmulq_f32(glmm_splat_x(r2), l0); - v3 = vmulq_f32(glmm_splat_x(r3), l0); + v0 = vmulq_f32(glmm_splat_x(r0), l); + v1 = vmulq_f32(glmm_splat_x(r1), l); + v2 = vmulq_f32(glmm_splat_x(r2), l); + v3 = vmulq_f32(glmm_splat_x(r3), l); - v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0); - v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1); - v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2); - v3 = glmm_fmadd(glmm_splat_y(r3), l1, v3); + l = glmm_load(m1[1]); + v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_y(r3), l, v3); - v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0); - v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1); - v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2); - v3 = glmm_fmadd(glmm_splat_z(r3), l2, v3); + l = glmm_load(m1[2]); + v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_z(r3), l, v3); - v0 = glmm_fmadd(glmm_splat_w(r0), l3, v0); - v1 = glmm_fmadd(glmm_splat_w(r1), l3, v1); - v2 = glmm_fmadd(glmm_splat_w(r2), l3, v2); - v3 = glmm_fmadd(glmm_splat_w(r3), l3, v3); + l = glmm_load(m1[3]); + v0 = glmm_fmadd(glmm_splat_w(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_w(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_w(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_w(r3), l, v3); glmm_store(dest[0], v0); glmm_store(dest[1], v1); From 376cf31ee7bb531fa79cef9135dc37493781f4c1 Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Sat, 1 May 2021 02:46:14 +0300 Subject: [PATCH 05/14] armi neon: optimize affine with neon --- include/cglm/simd/neon/affine.h | 70 ++++++++++++++++++--------------- 1 file changed, 38 insertions(+), 32 deletions(-) diff --git a/include/cglm/simd/neon/affine.h b/include/cglm/simd/neon/affine.h index 3e0cccd..e1c23e0 100644 --- a/include/cglm/simd/neon/affine.h +++ b/include/cglm/simd/neon/affine.h @@ -17,29 +17,32 @@ void glm_mul_neon(mat4 m1, mat4 m2, mat4 dest) { /* D = R * L (Column-Major) */ - glmm_128 l0, l1, l2, l3, r0, r1, r2, r3, v0, v1, v2, v3; + glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3; - l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]); - l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]); - l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]); - l3 = glmm_load(m1[3]); r3 = glmm_load(m2[3]); + l = glmm_load(m1[0]); + r0 = glmm_load(m2[0]); + r1 = glmm_load(m2[1]); + r2 = glmm_load(m2[2]); + r3 = glmm_load(m2[3]); - v0 = vmulq_f32(glmm_splat_x(r0), l0); - v1 = vmulq_f32(glmm_splat_x(r1), l0); - v2 = vmulq_f32(glmm_splat_x(r2), l0); - v3 = vmulq_f32(glmm_splat_x(r3), l0); + v0 = vmulq_f32(glmm_splat_x(r0), l); + v1 = vmulq_f32(glmm_splat_x(r1), l); + v2 = vmulq_f32(glmm_splat_x(r2), l); + v3 = vmulq_f32(glmm_splat_x(r3), l); - v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0); - v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1); - v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2); - v3 = glmm_fmadd(glmm_splat_y(r3), l1, v3); + l = glmm_load(m1[1]); + v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_y(r3), l, v3); - v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0); - v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1); - v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2); - v3 = glmm_fmadd(glmm_splat_z(r3), l2, v3); + l = glmm_load(m1[2]); + v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_z(r3), l, v3); - v3 = glmm_fmadd(glmm_splat_w(r3), l3, v3); + v3 = glmm_fmadd(glmm_splat_w(r3), glmm_load(m1[3]), v3); glmm_store(dest[0], v0); glmm_store(dest[1], v1); @@ -52,23 +55,26 @@ void glm_mul_rot_neon(mat4 m1, mat4 m2, mat4 dest) { /* D = R * L (Column-Major) */ - glmm_128 l0, l1, l2, r0, r1, r2, v0, v1, v2; + glmm_128 l, r0, r1, r2, v0, v1, v2; - l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]); - l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]); - l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]); + l = glmm_load(m1[0]); + r0 = glmm_load(m2[0]); + r1 = glmm_load(m2[1]); + r2 = glmm_load(m2[2]); - v0 = vmulq_f32(glmm_splat_x(r0), l0); - v1 = vmulq_f32(glmm_splat_x(r1), l0); - v2 = vmulq_f32(glmm_splat_x(r2), l0); + v0 = vmulq_f32(glmm_splat_x(r0), l); + v1 = vmulq_f32(glmm_splat_x(r1), l); + v2 = vmulq_f32(glmm_splat_x(r2), l); - v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0); - v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1); - v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2); - - v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0); - v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1); - v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2); + l = glmm_load(m1[1]); + v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); + + l = glmm_load(m1[2]); + v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); glmm_store(dest[0], v0); glmm_store(dest[1], v1); From 5b7bc522acabd384ea52f49a455a01b1f14d697e Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Sat, 1 May 2021 02:58:14 +0300 Subject: [PATCH 06/14] sse: optimize affine with sse * re-oder instructions for ILP --- include/cglm/simd/sse2/affine.h | 121 +++++++++++++++++--------------- 1 file changed, 64 insertions(+), 57 deletions(-) diff --git a/include/cglm/simd/sse2/affine.h b/include/cglm/simd/sse2/affine.h index b5d64f0..8f3f2da 100644 --- a/include/cglm/simd/sse2/affine.h +++ b/include/cglm/simd/sse2/affine.h @@ -16,76 +16,80 @@ CGLM_INLINE void glm_mul_sse2(mat4 m1, mat4 m2, mat4 dest) { /* D = R * L (Column-Major) */ - __m128 l0, l1, l2, l3, r; + glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3; - l0 = glmm_load(m1[0]); - l1 = glmm_load(m1[1]); - l2 = glmm_load(m1[2]); - l3 = glmm_load(m1[3]); - - r = glmm_load(m2[0]); - glmm_store(dest[0], - glmm_fmadd(glmm_splat(r, 0), l0, - glmm_fmadd(glmm_splat(r, 1), l1, - _mm_mul_ps(glmm_splat(r, 2), l2)))); - - r = glmm_load(m2[1]); - glmm_store(dest[1], - glmm_fmadd(glmm_splat(r, 0), l0, - glmm_fmadd(glmm_splat(r, 1), l1, - _mm_mul_ps(glmm_splat(r, 2), l2)))); + l = glmm_load(m1[0]); + r0 = glmm_load(m2[0]); + r1 = glmm_load(m2[1]); + r2 = glmm_load(m2[2]); + r3 = glmm_load(m2[3]); - r = glmm_load(m2[2]); - glmm_store(dest[2], - glmm_fmadd(glmm_splat(r, 0), l0, - glmm_fmadd(glmm_splat(r, 1), l1, - _mm_mul_ps(glmm_splat(r, 2), l2)))); + v0 = _mm_mul_ps(glmm_splat_x(r0), l); + v1 = _mm_mul_ps(glmm_splat_x(r1), l); + v2 = _mm_mul_ps(glmm_splat_x(r2), l); + v3 = _mm_mul_ps(glmm_splat_x(r3), l); - r = glmm_load(m2[3]); - glmm_store(dest[3], - glmm_fmadd(glmm_splat(r, 0), l0, - glmm_fmadd(glmm_splat(r, 1), l1, - glmm_fmadd(glmm_splat(r, 2), l2, - _mm_mul_ps(glmm_splat(r, 3), l3))))); + l = glmm_load(m1[1]); + v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_y(r3), l, v3); + + l = glmm_load(m1[2]); + v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_z(r3), l, v3); + + l = glmm_load(m1[3]); + v3 = glmm_fmadd(glmm_splat_w(r3), l, v3); + + glmm_store(dest[0], v0); + glmm_store(dest[1], v1); + glmm_store(dest[2], v2); + glmm_store(dest[3], v3); } CGLM_INLINE void glm_mul_rot_sse2(mat4 m1, mat4 m2, mat4 dest) { /* D = R * L (Column-Major) */ - __m128 l0, l1, l2, l3, r; - l0 = glmm_load(m1[0]); - l1 = glmm_load(m1[1]); - l2 = glmm_load(m1[2]); - l3 = glmm_load(m1[3]); + glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3; - r = glmm_load(m2[0]); - glmm_store(dest[0], - glmm_fmadd(glmm_splat(r, 0), l0, - glmm_fmadd(glmm_splat(r, 1), l1, - _mm_mul_ps(glmm_splat(r, 2), l2)))); - - r = glmm_load(m2[1]); - glmm_store(dest[1], - glmm_fmadd(glmm_splat(r, 0), l0, - glmm_fmadd(glmm_splat(r, 1), l1, - _mm_mul_ps(glmm_splat(r, 2), l2)))); - - - r = glmm_load(m2[2]); - glmm_store(dest[2], - glmm_fmadd(glmm_splat(r, 0), l0, - glmm_fmadd(glmm_splat(r, 1), l1, - _mm_mul_ps(glmm_splat(r, 2), l2)))); + l = glmm_load(m1[0]); + r0 = glmm_load(m2[0]); + r1 = glmm_load(m2[1]); + r2 = glmm_load(m2[2]); + r3 = glmm_load(m2[3]); - glmm_store(dest[3], l3); + v0 = _mm_mul_ps(glmm_splat_x(r0), l); + v1 = _mm_mul_ps(glmm_splat_x(r1), l); + v2 = _mm_mul_ps(glmm_splat_x(r2), l); + v3 = _mm_mul_ps(glmm_splat_x(r3), l); + + l = glmm_load(m1[1]); + v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_y(r3), l, v3); + + l = glmm_load(m1[2]); + v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_z(r3), l, v3); + + glmm_store(dest[0], v0); + glmm_store(dest[1], v1); + glmm_store(dest[2], v2); + glmm_store(dest[3], glmm_load(m1[3])); } CGLM_INLINE void glm_inv_tr_sse2(mat4 mat) { - __m128 r0, r1, r2, r3, x0, x1; + __m128 r0, r1, r2, r3, x0, x1, x2, x3, x4, x5; r0 = glmm_load(mat[0]); r1 = glmm_load(mat[1]); @@ -95,10 +99,13 @@ glm_inv_tr_sse2(mat4 mat) { _MM_TRANSPOSE4_PS(r0, r1, r2, x1); - x0 = glmm_fmadd(r0, glmm_shuff1(r3, 0, 0, 0, 0), - glmm_fmadd(r1, glmm_shuff1(r3, 1, 1, 1, 1), - _mm_mul_ps(r2, glmm_shuff1(r3, 2, 2, 2, 2)))); - x0 = _mm_xor_ps(x0, _mm_set1_ps(-0.f)); + x2 = glmm_shuff1(r3, 0, 0, 0, 0); + x3 = glmm_shuff1(r3, 1, 1, 1, 1); + x4 = glmm_shuff1(r3, 2, 2, 2, 2); + x5 = _mm_set1_ps(-0.f); + + x0 = glmm_fmadd(r0, x2, glmm_fmadd(r1, x3, _mm_mul_ps(r2, x4))); + x0 = _mm_xor_ps(x0, x5); x0 = _mm_add_ps(x0, x1); From 2be6ac949bdfa389cdc8465b6fb7bb0db9498b6e Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Sat, 1 May 2021 03:18:26 +0300 Subject: [PATCH 07/14] sse: optimize glm_quat_mul with sse --- include/cglm/simd/sse2/quat.h | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/include/cglm/simd/sse2/quat.h b/include/cglm/simd/sse2/quat.h index ae82885..3f7405f 100644 --- a/include/cglm/simd/sse2/quat.h +++ b/include/cglm/simd/sse2/quat.h @@ -22,21 +22,30 @@ glm_quat_mul_sse2(versor p, versor q, versor dest) { a1 a2 − b1 b2 − c1 c2 − d1 d2 */ - __m128 xp, xq, x0, r; + __m128 xp, xq, x1, x2, x3, r, x, y, z; xp = glmm_load(p); /* 3 2 1 0 */ xq = glmm_load(q); + x1 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f); + r = _mm_mul_ps(glmm_splat_w(xp), xq); + + x2 = _mm_unpackhi_ps(x1, x1); + x3 = glmm_shuff1(x1, 3, 2, 0, 1); + x = glmm_splat_x(xp); + y = glmm_splat_y(xp); + z = glmm_splat_z(xp); - r = _mm_mul_ps(glmm_splat(xp, 3), xq); - - x0 = _mm_xor_ps(glmm_splat(xp, 0), _mm_set_ps(-0.f, 0.f, -0.f, 0.f)); - r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 0, 1, 2, 3))); - - x0 = _mm_xor_ps(glmm_splat(xp, 1), _mm_set_ps(-0.f, -0.f, 0.f, 0.f)); - r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 1, 0, 3, 2))); - - x0 = _mm_xor_ps(glmm_splat(xp, 2), _mm_set_ps(-0.f, 0.f, 0.f, -0.f)); - r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 2, 3, 0, 1))); + x = _mm_xor_ps(x, x1); + y = _mm_xor_ps(y, x2); + z = _mm_xor_ps(z, x3); + + x1 = glmm_shuff1(xq, 0, 1, 2, 3); + x2 = glmm_shuff1(xq, 1, 0, 3, 2); + x3 = glmm_shuff1(xq, 2, 3, 0, 1); + + r = glmm_fmadd(x, x1, r); + r = glmm_fmadd(y, x2, r); + r = glmm_fmadd(z, x3, r); glmm_store(dest, r); } From faf6186c29bdea611bb185e4788ea5ed61ee5543 Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Sat, 1 May 2021 03:44:04 +0300 Subject: [PATCH 08/14] sse: optimize glm_mat2_mul_sse2 with sse --- include/cglm/simd/sse2/mat2.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/include/cglm/simd/sse2/mat2.h b/include/cglm/simd/sse2/mat2.h index 1f832b0..31b3a29 100644 --- a/include/cglm/simd/sse2/mat2.h +++ b/include/cglm/simd/sse2/mat2.h @@ -15,20 +15,23 @@ CGLM_INLINE void glm_mat2_mul_sse2(mat2 m1, mat2 m2, mat2 dest) { - __m128 x0, x1, x2; + __m128 x0, x1, x2, x3, x4; x1 = glmm_load(m1[0]); /* d c b a */ x2 = glmm_load(m2[0]); /* h g f e */ + x3 = glmm_shuff1(x2, 2, 2, 0, 0); + x4 = glmm_shuff1(x2, 3, 3, 1, 1); + x0 = _mm_movelh_ps(x1, x1); + x2 = _mm_movehl_ps(x1, x1); + /* dest[0][0] = a * e + c * f; dest[0][1] = b * e + d * f; dest[1][0] = a * g + c * h; dest[1][1] = b * g + d * h; */ - x0 = glmm_fmadd(_mm_movelh_ps(x1, x1), glmm_shuff1(x2, 2, 2, 0, 0), - _mm_mul_ps(_mm_movehl_ps(x1, x1), - glmm_shuff1(x2, 3, 3, 1, 1))); + x0 = glmm_fmadd(x0, x3, _mm_mul_ps(x2, x4)); glmm_store(dest[0], x0); } From 0f96eaad203eebb6e4f3d44bf6fbb8a2be8d95b8 Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Sat, 1 May 2021 22:55:19 +0300 Subject: [PATCH 09/14] sse2: optimize glm_mat3_mul_sse2() with sse2 * reduce memory access for dest[2][2] * the speed is increased ;) --- include/cglm/simd/sse2/mat3.h | 68 +++++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 22 deletions(-) diff --git a/include/cglm/simd/sse2/mat3.h b/include/cglm/simd/sse2/mat3.h index cda8449..20a55c1 100644 --- a/include/cglm/simd/sse2/mat3.h +++ b/include/cglm/simd/sse2/mat3.h @@ -15,37 +15,61 @@ CGLM_INLINE void glm_mat3_mul_sse2(mat3 m1, mat3 m2, mat3 dest) { - __m128 l0, l1, l2; - __m128 r0, r1, r2; - __m128 x0, x1, x2; - + __m128 l0, l1, l2, r0, r1, r2, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9; + l0 = _mm_loadu_ps(m1[0]); l1 = _mm_loadu_ps(&m1[1][1]); - l2 = _mm_set1_ps(m1[2][2]); r0 = _mm_loadu_ps(m2[0]); r1 = _mm_loadu_ps(&m2[1][1]); - r2 = _mm_set1_ps(m2[2][2]); - x1 = glmm_shuff2(l0, l1, 1, 0, 3, 3, 0, 3, 2, 0); - x2 = glmm_shuff2(l1, l2, 0, 0, 3, 2, 0, 2, 1, 0); + x8 = glmm_shuff1(l0, 0, 2, 1, 0); /* a00 a02 a01 a00 */ + x1 = glmm_shuff1(r0, 3, 0, 0, 0); /* b10 b00 b00 b00 */ + x2 = _mm_shuffle_ps(l0, l1, _MM_SHUFFLE(1, 0, 3, 3)); /* a12 a11 a10 a10 */ + x3 = _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(2, 0, 3, 1)); /* b20 b11 b10 b01 */ + x0 = _mm_mul_ps(x8, x1); - x0 = glmm_fmadd(glmm_shuff1(l0, 0, 2, 1, 0), glmm_shuff1(r0, 3, 0, 0, 0), - glmm_fmadd(x1, glmm_shuff2(r0, r1, 0, 0, 1, 1, 2, 0, 0, 0), - _mm_mul_ps(x2, glmm_shuff2(r0, r1, 1, 1, 2, 2, 2, 0, 0, 0)))); + x6 = glmm_shuff1(l0, 1, 0, 2, 1); /* a01 a00 a02 a01 */ + x7 = glmm_shuff1(x3, 3, 3, 1, 1); /* b20 b20 b10 b10 */ + l2 = _mm_load_ss(&m1[2][2]); + r2 = _mm_load_ss(&m2[2][2]); + x1 = _mm_mul_ps(x6, x7); + l2 = glmm_shuff1(l2, 0, 0, 1, 0); /* a22 a22 0.f a22 */ + r2 = glmm_shuff1(r2, 0, 0, 1, 0); /* b22 b22 0.f b22 */ + + x4 = glmm_shuff1(x2, 0, 3, 2, 0); /* a10 a12 a11 a10 */ + x5 = glmm_shuff1(x2, 2, 0, 3, 2); /* a11 a10 a12 a11 */ + x6 = glmm_shuff1(x3, 2, 0, 0, 0); /* b11 b01 b01 b01 */ + x2 = glmm_shuff1(r1, 3, 3, 0, 0); /* b21 b21 b11 b11 */ + + x8 = _mm_unpackhi_ps(x8, x4); /* a10 a00 a12 a02 */ + x9 = _mm_unpackhi_ps(x7, x2); /* b21 b20 b21 b20 */ + + x0 = glmm_fmadd(x4, x6, x0); + x1 = glmm_fmadd(x5, x2, x1); + + x2 = _mm_movehl_ps(l2, l1); /* a22 a22 a21 a20 */ + x3 = glmm_shuff1(x2, 0, 2, 1, 0); /* a20 a22 a21 a20 */ + x2 = glmm_shuff1(x2, 1, 0, 2, 1); /* a21 a20 a22 a21 */ + x4 = _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(1, 1, 2, 2)); /* b12 b12 b02 b02 */ + + x5 = glmm_shuff1(x4, 3, 0, 0, 0); /* b12 b02 b02 b02 */ + x4 = _mm_movehl_ps(r2, x4); /* b22 b22 b12 b12 */ + x0 = glmm_fmadd(x3, x5, x0); + x1 = glmm_fmadd(x2, x4, x1); + + /* + Dot Product : dest[2][2] = a02 * b20 + + a12 * b21 + + a22 * b22 + + 0 * 00 */ + x2 = _mm_movelh_ps(x8, l2); /* 0.f a22 a12 a02 */ + x3 = _mm_movelh_ps(x9, r2); /* 0.f b22 b21 b20 */ + x2 = glmm_vdots(x2, x3); _mm_storeu_ps(dest[0], x0); - - x0 = glmm_fmadd(glmm_shuff1(l0, 1, 0, 2, 1), _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(2, 2, 3, 3)), - glmm_fmadd(glmm_shuff1(x1, 1, 0, 2, 1), glmm_shuff1(r1, 3, 3, 0, 0), - _mm_mul_ps(glmm_shuff1(x2, 1, 0, 2, 1), - _mm_shuffle_ps(r1, r2, _MM_SHUFFLE(0, 0, 1, 1))))); - - _mm_storeu_ps(&dest[1][1], x0); - - dest[2][2] = m1[0][2] * m2[2][0] - + m1[1][2] * m2[2][1] - + m1[2][2] * m2[2][2]; + _mm_storeu_ps(&dest[1][1], x1); + _mm_store_ss(&dest[2][2], x2); } #endif From e1b142bce7afa77019aa95b6b5197fadb71eeb2a Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Sat, 1 May 2021 23:03:41 +0300 Subject: [PATCH 10/14] add todo to quat.h --- include/cglm/simd/sse2/quat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/cglm/simd/sse2/quat.h b/include/cglm/simd/sse2/quat.h index 3f7405f..94850cc 100644 --- a/include/cglm/simd/sse2/quat.h +++ b/include/cglm/simd/sse2/quat.h @@ -26,7 +26,7 @@ glm_quat_mul_sse2(versor p, versor q, versor dest) { xp = glmm_load(p); /* 3 2 1 0 */ xq = glmm_load(q); - x1 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f); + x1 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f); /* TODO: _mm_set1_ss() + shuff ? */ r = _mm_mul_ps(glmm_splat_w(xp), xq); x2 = _mm_unpackhi_ps(x1, x1); From 28705be5a3a0840e9778b5fecabcd6bafaf92ef4 Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Sat, 1 May 2021 23:16:03 +0300 Subject: [PATCH 11/14] simd, sse: reduce some computation at glm_mul_rot_sse2() --- include/cglm/simd/sse2/affine.h | 6 +----- include/cglm/simd/sse2/mat3.h | 4 ++-- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/include/cglm/simd/sse2/affine.h b/include/cglm/simd/sse2/affine.h index 8f3f2da..99edaa0 100644 --- a/include/cglm/simd/sse2/affine.h +++ b/include/cglm/simd/sse2/affine.h @@ -55,30 +55,26 @@ void glm_mul_rot_sse2(mat4 m1, mat4 m2, mat4 dest) { /* D = R * L (Column-Major) */ - glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3; + glmm_128 l, r0, r1, r2, v0, v1, v2; l = glmm_load(m1[0]); r0 = glmm_load(m2[0]); r1 = glmm_load(m2[1]); r2 = glmm_load(m2[2]); - r3 = glmm_load(m2[3]); v0 = _mm_mul_ps(glmm_splat_x(r0), l); v1 = _mm_mul_ps(glmm_splat_x(r1), l); v2 = _mm_mul_ps(glmm_splat_x(r2), l); - v3 = _mm_mul_ps(glmm_splat_x(r3), l); l = glmm_load(m1[1]); v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); - v3 = glmm_fmadd(glmm_splat_y(r3), l, v3); l = glmm_load(m1[2]); v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); - v3 = glmm_fmadd(glmm_splat_z(r3), l, v3); glmm_store(dest[0], v0); glmm_store(dest[1], v1); diff --git a/include/cglm/simd/sse2/mat3.h b/include/cglm/simd/sse2/mat3.h index 20a55c1..f07320c 100644 --- a/include/cglm/simd/sse2/mat3.h +++ b/include/cglm/simd/sse2/mat3.h @@ -67,9 +67,9 @@ glm_mat3_mul_sse2(mat3 m1, mat3 m2, mat3 dest) { x3 = _mm_movelh_ps(x9, r2); /* 0.f b22 b21 b20 */ x2 = glmm_vdots(x2, x3); - _mm_storeu_ps(dest[0], x0); + _mm_storeu_ps(&dest[0][0], x0); _mm_storeu_ps(&dest[1][1], x1); - _mm_store_ss(&dest[2][2], x2); + _mm_store_ss (&dest[2][2], x2); } #endif From d0ab3aaa2e722ac3cbbcb5b94d6a3cba538ed04d Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Fri, 7 May 2021 01:46:03 +0300 Subject: [PATCH 12/14] arm, neon: util macros --- include/cglm/simd/arm.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/cglm/simd/arm.h b/include/cglm/simd/arm.h index e943ce3..d0c4bc3 100644 --- a/include/cglm/simd/arm.h +++ b/include/cglm/simd/arm.h @@ -29,6 +29,15 @@ vreinterpretq_f32_s32(veorq_s32(vreinterpretq_s32_f32(a), \ vreinterpretq_s32_f32(b))) +#define glmm_swplane(v) vextq_f32(v, v, 2) +#define glmm_low(x) vget_low_f32(x) +#define glmm_high(x) vget_high_f32(x) + +#define glmm_combine_ll(x, y) vcombine_f32(vget_low_f32(x), vget_low_f32(y)) +#define glmm_combine_hl(x, y) vcombine_f32(vget_high_f32(x), vget_low_f32(y)) +#define glmm_combine_lh(x, y) vcombine_f32(vget_low_f32(x), vget_high_f32(y)) +#define glmm_combine_hh(x, y) vcombine_f32(vget_high_f32(x), vget_high_f32(y)) + static inline float32x4_t glmm_abs(float32x4_t v) { From 7f9585ca725029bcc7bad389f677071dcbf6669b Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Fri, 7 May 2021 01:46:24 +0300 Subject: [PATCH 13/14] arrm, neon: impove hadd performance --- include/cglm/simd/arm.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/include/cglm/simd/arm.h b/include/cglm/simd/arm.h index d0c4bc3..50cec46 100644 --- a/include/cglm/simd/arm.h +++ b/include/cglm/simd/arm.h @@ -47,8 +47,13 @@ glmm_abs(float32x4_t v) { static inline float32x4_t glmm_vhadd(float32x4_t v) { - v = vaddq_f32(v, vrev64q_f32(v)); - return vaddq_f32(v, vcombine_f32(vget_high_f32(v), vget_low_f32(v))); + return vaddq_f32(vaddq_f32(glmm_splat_x(v), glmm_splat_y(v)), + vaddq_f32(glmm_splat_z(v), glmm_splat_w(v))); + /* + this seems slower: + v = vaddq_f32(v, vrev64q_f32(v)); + return vaddq_f32(v, vcombine_f32(vget_high_f32(v), vget_low_f32(v))); + */ } static inline From 5c22ca3abbd5d09d2229dbaf54dcb4f3d8514d6a Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Fri, 7 May 2021 01:52:12 +0300 Subject: [PATCH 14/14] arrm, neon: use negate instruction instead of xor in glm_inv_tr_neon() --- include/cglm/simd/neon/affine.h | 2 +- include/cglm/simd/neon/mat2.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/cglm/simd/neon/affine.h b/include/cglm/simd/neon/affine.h index e1c23e0..f4557e7 100644 --- a/include/cglm/simd/neon/affine.h +++ b/include/cglm/simd/neon/affine.h @@ -97,7 +97,7 @@ glm_inv_tr_neon(mat4 mat) { x0 = glmm_fmadd(r0, glmm_splat_w(r0), glmm_fmadd(r1, glmm_splat_w(r1), vmulq_f32(r2, glmm_splat_w(r2)))); - x0 = glmm_xor(x0, glmm_set1(-0.f)); + x0 = vnegq_f32(x0); glmm_store(mat[0], r0); glmm_store(mat[1], r1); diff --git a/include/cglm/simd/neon/mat2.h b/include/cglm/simd/neon/mat2.h index 1bc9c9e..471ebea 100644 --- a/include/cglm/simd/neon/mat2.h +++ b/include/cglm/simd/neon/mat2.h @@ -22,8 +22,8 @@ glm_mat2_mul_neon(mat2 m1, mat2 m2, mat2 dest) { x1 = glmm_load(m1[0]); /* d c b a */ x2 = glmm_load(m2[0]); /* h g f e */ - dc = vget_high_f32(x1); - ba = vget_low_f32(x1); + dc = vget_high_f32(x1); + ba = vget_low_f32(x1); /* g g e e, h h f f */ a1 = vtrnq_f32(x2, x2);