From 376cf31ee7bb531fa79cef9135dc37493781f4c1 Mon Sep 17 00:00:00 2001 From: Recep Aslantas Date: Sat, 1 May 2021 02:46:14 +0300 Subject: [PATCH] armi neon: optimize affine with neon --- include/cglm/simd/neon/affine.h | 70 ++++++++++++++++++--------------- 1 file changed, 38 insertions(+), 32 deletions(-) diff --git a/include/cglm/simd/neon/affine.h b/include/cglm/simd/neon/affine.h index 3e0cccd..e1c23e0 100644 --- a/include/cglm/simd/neon/affine.h +++ b/include/cglm/simd/neon/affine.h @@ -17,29 +17,32 @@ void glm_mul_neon(mat4 m1, mat4 m2, mat4 dest) { /* D = R * L (Column-Major) */ - glmm_128 l0, l1, l2, l3, r0, r1, r2, r3, v0, v1, v2, v3; + glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3; - l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]); - l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]); - l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]); - l3 = glmm_load(m1[3]); r3 = glmm_load(m2[3]); + l = glmm_load(m1[0]); + r0 = glmm_load(m2[0]); + r1 = glmm_load(m2[1]); + r2 = glmm_load(m2[2]); + r3 = glmm_load(m2[3]); - v0 = vmulq_f32(glmm_splat_x(r0), l0); - v1 = vmulq_f32(glmm_splat_x(r1), l0); - v2 = vmulq_f32(glmm_splat_x(r2), l0); - v3 = vmulq_f32(glmm_splat_x(r3), l0); + v0 = vmulq_f32(glmm_splat_x(r0), l); + v1 = vmulq_f32(glmm_splat_x(r1), l); + v2 = vmulq_f32(glmm_splat_x(r2), l); + v3 = vmulq_f32(glmm_splat_x(r3), l); - v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0); - v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1); - v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2); - v3 = glmm_fmadd(glmm_splat_y(r3), l1, v3); + l = glmm_load(m1[1]); + v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_y(r3), l, v3); - v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0); - v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1); - v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2); - v3 = glmm_fmadd(glmm_splat_z(r3), l2, v3); + l = glmm_load(m1[2]); + v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); + v3 = glmm_fmadd(glmm_splat_z(r3), l, v3); - v3 = glmm_fmadd(glmm_splat_w(r3), l3, v3); + v3 = glmm_fmadd(glmm_splat_w(r3), glmm_load(m1[3]), v3); glmm_store(dest[0], v0); glmm_store(dest[1], v1); @@ -52,23 +55,26 @@ void glm_mul_rot_neon(mat4 m1, mat4 m2, mat4 dest) { /* D = R * L (Column-Major) */ - glmm_128 l0, l1, l2, r0, r1, r2, v0, v1, v2; + glmm_128 l, r0, r1, r2, v0, v1, v2; - l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]); - l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]); - l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]); + l = glmm_load(m1[0]); + r0 = glmm_load(m2[0]); + r1 = glmm_load(m2[1]); + r2 = glmm_load(m2[2]); - v0 = vmulq_f32(glmm_splat_x(r0), l0); - v1 = vmulq_f32(glmm_splat_x(r1), l0); - v2 = vmulq_f32(glmm_splat_x(r2), l0); + v0 = vmulq_f32(glmm_splat_x(r0), l); + v1 = vmulq_f32(glmm_splat_x(r1), l); + v2 = vmulq_f32(glmm_splat_x(r2), l); - v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0); - v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1); - v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2); - - v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0); - v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1); - v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2); + l = glmm_load(m1[1]); + v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); + + l = glmm_load(m1[2]); + v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); + v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); + v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); glmm_store(dest[0], v0); glmm_store(dest[1], v1);