mirror of
https://github.com/recp/cglm.git
synced 2025-12-24 12:32:40 +00:00
4
CREDITS
4
CREDITS
@@ -74,3 +74,7 @@ Link to paper: http://webserver2.tecgraf.puc-rio.br/~mgattass/cg/trbRR/Fast%20Mi
|
||||
|
||||
14. ARM NEON: Matrix Vector Multiplication
|
||||
https://stackoverflow.com/a/57793352/2676533
|
||||
|
||||
16. ARM NEON Div
|
||||
|
||||
http://github.com/microsoft/DirectXMath
|
||||
|
||||
@@ -26,6 +26,10 @@
|
||||
# include "simd/avx/affine.h"
|
||||
#endif
|
||||
|
||||
#ifdef CGLM_NEON_FP
|
||||
# include "simd/neon/affine.h"
|
||||
#endif
|
||||
|
||||
/*!
|
||||
* @brief this is similar to glm_mat4_mul but specialized to affine transform
|
||||
*
|
||||
@@ -49,6 +53,8 @@ glm_mul(mat4 m1, mat4 m2, mat4 dest) {
|
||||
glm_mul_avx(m1, m2, dest);
|
||||
#elif defined( __SSE__ ) || defined( __SSE2__ )
|
||||
glm_mul_sse2(m1, m2, dest);
|
||||
#elif defined(CGLM_NEON_FP)
|
||||
glm_mul_neon(m1, m2, dest);
|
||||
#else
|
||||
float a00 = m1[0][0], a01 = m1[0][1], a02 = m1[0][2], a03 = m1[0][3],
|
||||
a10 = m1[1][0], a11 = m1[1][1], a12 = m1[1][2], a13 = m1[1][3],
|
||||
@@ -103,6 +109,8 @@ void
|
||||
glm_mul_rot(mat4 m1, mat4 m2, mat4 dest) {
|
||||
#if defined( __SSE__ ) || defined( __SSE2__ )
|
||||
glm_mul_rot_sse2(m1, m2, dest);
|
||||
#elif defined(CGLM_NEON_FP)
|
||||
glm_mul_rot_neon(m1, m2, dest);
|
||||
#else
|
||||
float a00 = m1[0][0], a01 = m1[0][1], a02 = m1[0][2], a03 = m1[0][3],
|
||||
a10 = m1[1][0], a11 = m1[1][1], a12 = m1[1][2], a13 = m1[1][3],
|
||||
|
||||
@@ -50,26 +50,22 @@
|
||||
CGLM_INLINE
|
||||
void
|
||||
glm_translate(mat4 m, vec3 v) {
|
||||
#if defined( __SSE__ ) || defined( __SSE2__ )
|
||||
#if defined(CGLM_SIMD)
|
||||
glmm_128 m0, m1, m2, m3;
|
||||
|
||||
m0 = glmm_load(m[0]);
|
||||
m1 = glmm_load(m[1]);
|
||||
m2 = glmm_load(m[2]);
|
||||
m3 = glmm_load(m[3]);
|
||||
|
||||
glmm_store(m[3],
|
||||
_mm_add_ps(_mm_add_ps(_mm_mul_ps(glmm_load(m[0]),
|
||||
_mm_set1_ps(v[0])),
|
||||
_mm_mul_ps(glmm_load(m[1]),
|
||||
_mm_set1_ps(v[1]))),
|
||||
_mm_add_ps(_mm_mul_ps(glmm_load(m[2]),
|
||||
_mm_set1_ps(v[2])),
|
||||
glmm_load(m[3]))))
|
||||
;
|
||||
glmm_fmadd(m0, glmm_set1(v[0]),
|
||||
glmm_fmadd(m1, glmm_set1(v[1]),
|
||||
glmm_fmadd(m2, glmm_set1(v[2]), m3))));
|
||||
#else
|
||||
vec4 v1, v2, v3;
|
||||
|
||||
glm_vec4_scale(m[0], v[0], v1);
|
||||
glm_vec4_scale(m[1], v[1], v2);
|
||||
glm_vec4_scale(m[2], v[2], v3);
|
||||
|
||||
glm_vec4_add(v1, m[3], m[3]);
|
||||
glm_vec4_add(v2, m[3], m[3]);
|
||||
glm_vec4_add(v3, m[3], m[3]);
|
||||
glm_vec4_muladds(m[0], v[0], m[3]);
|
||||
glm_vec4_muladds(m[1], v[1], m[3]);
|
||||
glm_vec4_muladds(m[2], v[2], m[3]);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -99,12 +95,8 @@ glm_translate_to(mat4 m, vec3 v, mat4 dest) {
|
||||
CGLM_INLINE
|
||||
void
|
||||
glm_translate_x(mat4 m, float x) {
|
||||
#if defined( __SSE__ ) || defined( __SSE2__ )
|
||||
glmm_store(m[3],
|
||||
_mm_add_ps(_mm_mul_ps(glmm_load(m[0]),
|
||||
_mm_set1_ps(x)),
|
||||
glmm_load(m[3])))
|
||||
;
|
||||
#if defined(CGLM_SIMD)
|
||||
glmm_store(m[3], glmm_fmadd(glmm_load(m[0]), glmm_set1(x), glmm_load(m[3])));
|
||||
#else
|
||||
vec4 v1;
|
||||
glm_vec4_scale(m[0], x, v1);
|
||||
@@ -121,12 +113,8 @@ glm_translate_x(mat4 m, float x) {
|
||||
CGLM_INLINE
|
||||
void
|
||||
glm_translate_y(mat4 m, float y) {
|
||||
#if defined( __SSE__ ) || defined( __SSE2__ )
|
||||
glmm_store(m[3],
|
||||
_mm_add_ps(_mm_mul_ps(glmm_load(m[1]),
|
||||
_mm_set1_ps(y)),
|
||||
glmm_load(m[3])))
|
||||
;
|
||||
#if defined(CGLM_SIMD)
|
||||
glmm_store(m[3], glmm_fmadd(glmm_load(m[1]), glmm_set1(y), glmm_load(m[3])));
|
||||
#else
|
||||
vec4 v1;
|
||||
glm_vec4_scale(m[1], y, v1);
|
||||
@@ -143,12 +131,8 @@ glm_translate_y(mat4 m, float y) {
|
||||
CGLM_INLINE
|
||||
void
|
||||
glm_translate_z(mat4 m, float z) {
|
||||
#if defined( __SSE__ ) || defined( __SSE2__ )
|
||||
glmm_store(m[3],
|
||||
_mm_add_ps(_mm_mul_ps(glmm_load(m[2]),
|
||||
_mm_set1_ps(z)),
|
||||
glmm_load(m[3])))
|
||||
;
|
||||
#if defined(CGLM_SIMD)
|
||||
glmm_store(m[3], glmm_fmadd(glmm_load(m[2]), glmm_set1(z), glmm_load(m[3])));
|
||||
#else
|
||||
vec4 v1;
|
||||
glm_vec4_scale(m[2], z, v1);
|
||||
|
||||
@@ -562,6 +562,8 @@ float
|
||||
glm_mat4_det(mat4 mat) {
|
||||
#if defined( __SSE__ ) || defined( __SSE2__ )
|
||||
return glm_mat4_det_sse2(mat);
|
||||
#elif defined(CGLM_NEON_FP)
|
||||
return glm_mat4_det_neon(mat);
|
||||
#else
|
||||
/* [square] det(A) = det(At) */
|
||||
float t[6];
|
||||
@@ -595,6 +597,8 @@ void
|
||||
glm_mat4_inv(mat4 mat, mat4 dest) {
|
||||
#if defined( __SSE__ ) || defined( __SSE2__ )
|
||||
glm_mat4_inv_sse2(mat, dest);
|
||||
#elif defined(CGLM_NEON_FP)
|
||||
glm_mat4_inv_neon(mat, dest);
|
||||
#else
|
||||
float t[6];
|
||||
float det;
|
||||
|
||||
@@ -10,19 +10,42 @@
|
||||
#include "intrin.h"
|
||||
#ifdef CGLM_SIMD_ARM
|
||||
|
||||
#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || defined(_M_ARM64EC) || defined(__aarch64__)
|
||||
# define CGLM_ARM64 1
|
||||
#endif
|
||||
|
||||
#define glmm_load(p) vld1q_f32(p)
|
||||
#define glmm_store(p, a) vst1q_f32(p, a)
|
||||
|
||||
#define glmm_set1(x) vdupq_n_f32(x)
|
||||
#define glmm_128 float32x4_t
|
||||
|
||||
#define glmm_splat_x(x) vdupq_lane_f32(vget_low_f32(x), 0)
|
||||
#define glmm_splat_y(x) vdupq_lane_f32(vget_low_f32(x), 1)
|
||||
#define glmm_splat_z(x) vdupq_lane_f32(vget_high_f32(x), 0)
|
||||
#define glmm_splat_w(x) vdupq_lane_f32(vget_high_f32(x), 1)
|
||||
|
||||
#define glmm_xor(a, b) \
|
||||
vreinterpretq_f32_s32(veorq_s32(vreinterpretq_s32_f32(a), \
|
||||
vreinterpretq_s32_f32(b)))
|
||||
|
||||
static inline
|
||||
float32x4_t
|
||||
glmm_abs(float32x4_t v) {
|
||||
return vabsq_f32(v);
|
||||
}
|
||||
|
||||
static inline
|
||||
float32x4_t
|
||||
glmm_vhadd(float32x4_t v) {
|
||||
v = vaddq_f32(v, vrev64q_f32(v));
|
||||
return vaddq_f32(v, vcombine_f32(vget_high_f32(v), vget_low_f32(v)));
|
||||
}
|
||||
|
||||
static inline
|
||||
float
|
||||
glmm_hadd(float32x4_t v) {
|
||||
#if defined(__aarch64__)
|
||||
#if CGLM_ARM64
|
||||
return vaddvq_f32(v);
|
||||
#else
|
||||
v = vaddq_f32(v, vrev64q_f32(v));
|
||||
@@ -79,11 +102,28 @@ glmm_norm_inf(float32x4_t a) {
|
||||
return glmm_hmax(glmm_abs(a));
|
||||
}
|
||||
|
||||
static inline
|
||||
float32x4_t
|
||||
glmm_div(float32x4_t a, float32x4_t b) {
|
||||
#if CGLM_ARM64
|
||||
return vdivq_f32(a, b);
|
||||
#else
|
||||
/* 2 iterations of Newton-Raphson refinement of reciprocal */
|
||||
float32x4_t r0, r1;
|
||||
r0 = vrecpeq_f32(b);
|
||||
r1 = vrecpsq_f32(r0, b);
|
||||
r0 = vmulq_f32(r1, r0);
|
||||
r1 = vrecpsq_f32(r0, b);
|
||||
r0 = vmulq_f32(r1, r0);
|
||||
return vmulq_f32(a, r0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline
|
||||
float32x4_t
|
||||
glmm_fmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
|
||||
#if defined(__aarch64__)
|
||||
return vfmaq_f32(c, a, b);
|
||||
#if CGLM_ARM64
|
||||
return vfmaq_f32(c, a, b); /* why vfmaq_f32 is slower than vmlaq_f32 ??? */
|
||||
#else
|
||||
return vmlaq_f32(c, a, b);
|
||||
#endif
|
||||
@@ -92,7 +132,7 @@ glmm_fmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
|
||||
static inline
|
||||
float32x4_t
|
||||
glmm_fnmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
|
||||
#if defined(__aarch64__)
|
||||
#if CGLM_ARM64
|
||||
return vfmsq_f32(c, a, b);
|
||||
#else
|
||||
return vmlsq_f32(c, a, b);
|
||||
@@ -102,7 +142,7 @@ glmm_fnmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
|
||||
static inline
|
||||
float32x4_t
|
||||
glmm_fmsub(float32x4_t a, float32x4_t b, float32x4_t c) {
|
||||
#if defined(__aarch64__)
|
||||
#if CGLM_ARM64
|
||||
return vfmsq_f32(c, a, b);
|
||||
#else
|
||||
return vmlsq_f32(c, a, b);
|
||||
|
||||
80
include/cglm/simd/neon/affine.h
Normal file
80
include/cglm/simd/neon/affine.h
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Copyright (c), Recep Aslantas.
|
||||
*
|
||||
* MIT License (MIT), http://opensource.org/licenses/MIT
|
||||
* Full license can be found in the LICENSE file
|
||||
*/
|
||||
|
||||
#ifndef cglm_affine_neon_h
|
||||
#define cglm_affine_neon_h
|
||||
#if defined(__ARM_NEON_FP)
|
||||
|
||||
#include "../../common.h"
|
||||
#include "../intrin.h"
|
||||
|
||||
CGLM_INLINE
|
||||
void
|
||||
glm_mul_neon(mat4 m1, mat4 m2, mat4 dest) {
|
||||
/* D = R * L (Column-Major) */
|
||||
|
||||
glmm_128 l0, l1, l2, l3, r0, r1, r2, r3, v0, v1, v2, v3;
|
||||
|
||||
l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]);
|
||||
l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]);
|
||||
l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]);
|
||||
l3 = glmm_load(m1[3]); r3 = glmm_load(m2[3]);
|
||||
|
||||
v0 = vmulq_f32(glmm_splat_x(r0), l0);
|
||||
v1 = vmulq_f32(glmm_splat_x(r1), l0);
|
||||
v2 = vmulq_f32(glmm_splat_x(r2), l0);
|
||||
v3 = vmulq_f32(glmm_splat_x(r3), l0);
|
||||
|
||||
v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0);
|
||||
v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1);
|
||||
v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2);
|
||||
v3 = glmm_fmadd(glmm_splat_y(r3), l1, v3);
|
||||
|
||||
v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0);
|
||||
v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1);
|
||||
v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2);
|
||||
v3 = glmm_fmadd(glmm_splat_z(r3), l2, v3);
|
||||
|
||||
v3 = glmm_fmadd(glmm_splat_w(r3), l3, v3);
|
||||
|
||||
glmm_store(dest[0], v0);
|
||||
glmm_store(dest[1], v1);
|
||||
glmm_store(dest[2], v2);
|
||||
glmm_store(dest[3], v3);
|
||||
}
|
||||
|
||||
CGLM_INLINE
|
||||
void
|
||||
glm_mul_rot_neon(mat4 m1, mat4 m2, mat4 dest) {
|
||||
/* D = R * L (Column-Major) */
|
||||
|
||||
glmm_128 l0, l1, l2, r0, r1, r2, v0, v1, v2;
|
||||
|
||||
l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]);
|
||||
l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]);
|
||||
l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]);
|
||||
|
||||
v0 = vmulq_f32(glmm_splat_x(r0), l0);
|
||||
v1 = vmulq_f32(glmm_splat_x(r1), l0);
|
||||
v2 = vmulq_f32(glmm_splat_x(r2), l0);
|
||||
|
||||
v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0);
|
||||
v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1);
|
||||
v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2);
|
||||
|
||||
v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0);
|
||||
v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1);
|
||||
v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2);
|
||||
|
||||
glmm_store(dest[0], v0);
|
||||
glmm_store(dest[1], v1);
|
||||
glmm_store(dest[2], v2);
|
||||
glmm_store(dest[3], glmm_load(m1[3]));
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* cglm_affine_neon_h */
|
||||
@@ -42,41 +42,38 @@ CGLM_INLINE
|
||||
void
|
||||
glm_mat4_mul_neon(mat4 m1, mat4 m2, mat4 dest) {
|
||||
/* D = R * L (Column-Major) */
|
||||
float32x4_t l0, l1, l2, l3, r, d0, d1, d2, d3;
|
||||
|
||||
l0 = vld1q_f32(m2[0]);
|
||||
l1 = vld1q_f32(m2[1]);
|
||||
l2 = vld1q_f32(m2[2]);
|
||||
l3 = vld1q_f32(m2[3]);
|
||||
glmm_128 l0, l1, l2, l3, r0, r1, r2, r3, v0, v1, v2, v3;
|
||||
|
||||
r = vld1q_f32(m1[0]);
|
||||
d0 = vmulq_lane_f32(r, vget_low_f32(l0), 0);
|
||||
d1 = vmulq_lane_f32(r, vget_low_f32(l1), 0);
|
||||
d2 = vmulq_lane_f32(r, vget_low_f32(l2), 0);
|
||||
d3 = vmulq_lane_f32(r, vget_low_f32(l3), 0);
|
||||
l0 = glmm_load(m1[0]); r0 = glmm_load(m2[0]);
|
||||
l1 = glmm_load(m1[1]); r1 = glmm_load(m2[1]);
|
||||
l2 = glmm_load(m1[2]); r2 = glmm_load(m2[2]);
|
||||
l3 = glmm_load(m1[3]); r3 = glmm_load(m2[3]);
|
||||
|
||||
r = vld1q_f32(m1[1]);
|
||||
d0 = vmlaq_lane_f32(d0, r, vget_low_f32(l0), 1);
|
||||
d1 = vmlaq_lane_f32(d1, r, vget_low_f32(l1), 1);
|
||||
d2 = vmlaq_lane_f32(d2, r, vget_low_f32(l2), 1);
|
||||
d3 = vmlaq_lane_f32(d3, r, vget_low_f32(l3), 1);
|
||||
v0 = vmulq_f32(glmm_splat_x(r0), l0);
|
||||
v1 = vmulq_f32(glmm_splat_x(r1), l0);
|
||||
v2 = vmulq_f32(glmm_splat_x(r2), l0);
|
||||
v3 = vmulq_f32(glmm_splat_x(r3), l0);
|
||||
|
||||
r = vld1q_f32(m1[2]);
|
||||
d0 = vmlaq_lane_f32(d0, r, vget_high_f32(l0), 0);
|
||||
d1 = vmlaq_lane_f32(d1, r, vget_high_f32(l1), 0);
|
||||
d2 = vmlaq_lane_f32(d2, r, vget_high_f32(l2), 0);
|
||||
d3 = vmlaq_lane_f32(d3, r, vget_high_f32(l3), 0);
|
||||
v0 = glmm_fmadd(glmm_splat_y(r0), l1, v0);
|
||||
v1 = glmm_fmadd(glmm_splat_y(r1), l1, v1);
|
||||
v2 = glmm_fmadd(glmm_splat_y(r2), l1, v2);
|
||||
v3 = glmm_fmadd(glmm_splat_y(r3), l1, v3);
|
||||
|
||||
r = vld1q_f32(m1[3]);
|
||||
d0 = vmlaq_lane_f32(d0, r, vget_high_f32(l0), 1);
|
||||
d1 = vmlaq_lane_f32(d1, r, vget_high_f32(l1), 1);
|
||||
d2 = vmlaq_lane_f32(d2, r, vget_high_f32(l2), 1);
|
||||
d3 = vmlaq_lane_f32(d3, r, vget_high_f32(l3), 1);
|
||||
v0 = glmm_fmadd(glmm_splat_z(r0), l2, v0);
|
||||
v1 = glmm_fmadd(glmm_splat_z(r1), l2, v1);
|
||||
v2 = glmm_fmadd(glmm_splat_z(r2), l2, v2);
|
||||
v3 = glmm_fmadd(glmm_splat_z(r3), l2, v3);
|
||||
|
||||
vst1q_f32(dest[0], d0);
|
||||
vst1q_f32(dest[1], d1);
|
||||
vst1q_f32(dest[2], d2);
|
||||
vst1q_f32(dest[3], d3);
|
||||
v0 = glmm_fmadd(glmm_splat_w(r0), l3, v0);
|
||||
v1 = glmm_fmadd(glmm_splat_w(r1), l3, v1);
|
||||
v2 = glmm_fmadd(glmm_splat_w(r2), l3, v2);
|
||||
v3 = glmm_fmadd(glmm_splat_w(r3), l3, v3);
|
||||
|
||||
glmm_store(dest[0], v0);
|
||||
glmm_store(dest[1], v1);
|
||||
glmm_store(dest[2], v2);
|
||||
glmm_store(dest[3], v3);
|
||||
}
|
||||
|
||||
CGLM_INLINE
|
||||
@@ -101,5 +98,216 @@ glm_mat4_mulv_neon(mat4 m, vec4 v, vec4 dest) {
|
||||
vst1q_f32(dest, l0);
|
||||
}
|
||||
|
||||
CGLM_INLINE
|
||||
float
|
||||
glm_mat4_det_neon(mat4 mat) {
|
||||
float32x4_t r0, r1, r2, r3, x0, x1, x2;
|
||||
float32x2_t ij, op, mn, kl, nn, mm, jj, ii, gh, ef, t12, t34;
|
||||
float32x4x2_t a1;
|
||||
float32x4_t x3 = { 0.f, -0.f, 0.f, -0.f };
|
||||
|
||||
/* 127 <- 0, [square] det(A) = det(At) */
|
||||
r0 = glmm_load(mat[0]); /* d c b a */
|
||||
r1 = vrev64q_f32(glmm_load(mat[1])); /* g h e f */
|
||||
r2 = vrev64q_f32(glmm_load(mat[2])); /* l k i j */
|
||||
r3 = vrev64q_f32(glmm_load(mat[3])); /* o p m n */
|
||||
|
||||
gh = vget_high_f32(r1);
|
||||
ef = vget_low_f32(r1);
|
||||
kl = vget_high_f32(r2);
|
||||
ij = vget_low_f32(r2);
|
||||
op = vget_high_f32(r3);
|
||||
mn = vget_low_f32(r3);
|
||||
mm = vdup_lane_f32(mn, 1);
|
||||
nn = vdup_lane_f32(mn, 0);
|
||||
ii = vdup_lane_f32(ij, 1);
|
||||
jj = vdup_lane_f32(ij, 0);
|
||||
|
||||
/*
|
||||
t[1] = j * p - n * l;
|
||||
t[2] = j * o - n * k;
|
||||
t[3] = i * p - m * l;
|
||||
t[4] = i * o - m * k;
|
||||
*/
|
||||
x0 = glmm_fnmadd(vcombine_f32(kl, kl), vcombine_f32(nn, mm),
|
||||
vmulq_f32(vcombine_f32(op, op), vcombine_f32(jj, ii)));
|
||||
|
||||
t12 = vget_low_f32(x0);
|
||||
t34 = vget_high_f32(x0);
|
||||
|
||||
/* 1 3 1 3 2 4 2 4 */
|
||||
a1 = vuzpq_f32(x0, x0);
|
||||
|
||||
/*
|
||||
t[0] = k * p - o * l;
|
||||
t[0] = k * p - o * l;
|
||||
t[5] = i * n - m * j;
|
||||
t[5] = i * n - m * j;
|
||||
*/
|
||||
x1 = glmm_fnmadd(vcombine_f32(vdup_lane_f32(kl, 0), jj),
|
||||
vcombine_f32(vdup_lane_f32(op, 1), mm),
|
||||
vmulq_f32(vcombine_f32(vdup_lane_f32(op, 0), nn),
|
||||
vcombine_f32(vdup_lane_f32(kl, 1), ii)));
|
||||
|
||||
/*
|
||||
a * (f * t[0] - g * t[1] + h * t[2])
|
||||
- b * (e * t[0] - g * t[3] + h * t[4])
|
||||
+ c * (e * t[1] - f * t[3] + h * t[5])
|
||||
- d * (e * t[2] - f * t[4] + g * t[5])
|
||||
*/
|
||||
x2 = glmm_fnmadd(vcombine_f32(vdup_lane_f32(gh, 1), vdup_lane_f32(ef, 0)),
|
||||
vcombine_f32(vget_low_f32(a1.val[0]), t34),
|
||||
vmulq_f32(vcombine_f32(ef, vdup_lane_f32(ef, 1)),
|
||||
vcombine_f32(vget_low_f32(x1), t12)));
|
||||
|
||||
x2 = glmm_fmadd(vcombine_f32(vdup_lane_f32(gh, 0), gh),
|
||||
vcombine_f32(vget_low_f32(a1.val[1]), vget_high_f32(x1)), x2);
|
||||
|
||||
x2 = glmm_xor(x2, x3);
|
||||
|
||||
return glmm_hadd(vmulq_f32(x2, r0));
|
||||
}
|
||||
|
||||
CGLM_INLINE
|
||||
void
|
||||
glm_mat4_inv_neon(mat4 mat, mat4 dest) {
|
||||
float32x4_t r0, r1, r2, r3,
|
||||
v0, v1, v2, v3,
|
||||
t0, t1, t2, t3, t4, t5,
|
||||
x0, x1, x2, x3, x4, x5, x6, x7, x8;
|
||||
float32x4x2_t a1;
|
||||
float32x2_t lp, ko, hg, jn, im, fe, ae, bf, cg, dh;
|
||||
float32x4_t x9 = { -0.f, 0.f, -0.f, 0.f };
|
||||
|
||||
x8 = vrev64q_f32(x9);
|
||||
|
||||
/* 127 <- 0 */
|
||||
r0 = glmm_load(mat[0]); /* d c b a */
|
||||
r1 = glmm_load(mat[1]); /* h g f e */
|
||||
r2 = glmm_load(mat[2]); /* l k j i */
|
||||
r3 = glmm_load(mat[3]); /* p o n m */
|
||||
|
||||
/* l p k o, j n i m */
|
||||
a1 = vzipq_f32(r3, r2);
|
||||
|
||||
jn = vget_high_f32(a1.val[0]);
|
||||
im = vget_low_f32(a1.val[0]);
|
||||
lp = vget_high_f32(a1.val[1]);
|
||||
ko = vget_low_f32(a1.val[1]);
|
||||
hg = vget_high_f32(r1);
|
||||
|
||||
x1 = vcombine_f32(vdup_lane_f32(lp, 0), lp); /* l p p p */
|
||||
x2 = vcombine_f32(vdup_lane_f32(ko, 0), ko); /* k o o o */
|
||||
x0 = vcombine_f32(vdup_lane_f32(lp, 1), vdup_lane_f32(hg, 1)); /* h h l l */
|
||||
x3 = vcombine_f32(vdup_lane_f32(ko, 1), vdup_lane_f32(hg, 0)); /* g g k k */
|
||||
|
||||
/* t1[0] = k * p - o * l;
|
||||
t1[0] = k * p - o * l;
|
||||
t2[0] = g * p - o * h;
|
||||
t3[0] = g * l - k * h; */
|
||||
t0 = glmm_fnmadd(x2, x0, vmulq_f32(x3, x1));
|
||||
|
||||
fe = vget_low_f32(r1);
|
||||
x4 = vcombine_f32(vdup_lane_f32(jn, 0), jn); /* j n n n */
|
||||
x5 = vcombine_f32(vdup_lane_f32(jn, 1), vdup_lane_f32(fe, 1)); /* f f j j */
|
||||
|
||||
/* t1[1] = j * p - n * l;
|
||||
t1[1] = j * p - n * l;
|
||||
t2[1] = f * p - n * h;
|
||||
t3[1] = f * l - j * h; */
|
||||
t1 = glmm_fnmadd(x4, x0, vmulq_f32(x5, x1));
|
||||
|
||||
/* t1[2] = j * o - n * k
|
||||
t1[2] = j * o - n * k;
|
||||
t2[2] = f * o - n * g;
|
||||
t3[2] = f * k - j * g; */
|
||||
t2 = glmm_fnmadd(x4, x3, vmulq_f32(x5, x2));
|
||||
|
||||
x6 = vcombine_f32(vdup_lane_f32(im, 1), vdup_lane_f32(fe, 0)); /* e e i i */
|
||||
x7 = vcombine_f32(vdup_lane_f32(im, 0), im); /* i m m m */
|
||||
|
||||
/* t1[3] = i * p - m * l;
|
||||
t1[3] = i * p - m * l;
|
||||
t2[3] = e * p - m * h;
|
||||
t3[3] = e * l - i * h; */
|
||||
t3 = glmm_fnmadd(x7, x0, vmulq_f32(x6, x1));
|
||||
|
||||
/* t1[4] = i * o - m * k;
|
||||
t1[4] = i * o - m * k;
|
||||
t2[4] = e * o - m * g;
|
||||
t3[4] = e * k - i * g; */
|
||||
t4 = glmm_fnmadd(x7, x3, vmulq_f32(x6, x2));
|
||||
|
||||
/* t1[5] = i * n - m * j;
|
||||
t1[5] = i * n - m * j;
|
||||
t2[5] = e * n - m * f;
|
||||
t3[5] = e * j - i * f; */
|
||||
t5 = glmm_fnmadd(x7, x5, vmulq_f32(x6, x4));
|
||||
|
||||
/* h d f b, g c e a */
|
||||
a1 = vtrnq_f32(r0, r1);
|
||||
|
||||
x4 = vrev64q_f32(a1.val[0]); /* c g a e */
|
||||
x5 = vrev64q_f32(a1.val[1]); /* d h b f */
|
||||
|
||||
ae = vget_low_f32(x4);
|
||||
cg = vget_high_f32(x4);
|
||||
bf = vget_low_f32(x5);
|
||||
dh = vget_high_f32(x5);
|
||||
|
||||
x0 = vcombine_f32(ae, vdup_lane_f32(ae, 1)); /* a a a e */
|
||||
x1 = vcombine_f32(bf, vdup_lane_f32(bf, 1)); /* b b b f */
|
||||
x2 = vcombine_f32(cg, vdup_lane_f32(cg, 1)); /* c c c g */
|
||||
x3 = vcombine_f32(dh, vdup_lane_f32(dh, 1)); /* d d d h */
|
||||
|
||||
/*
|
||||
dest[0][0] = f * t1[0] - g * t1[1] + h * t1[2];
|
||||
dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]);
|
||||
dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2];
|
||||
dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */
|
||||
v0 = glmm_xor(glmm_fmadd(x3, t2, glmm_fnmadd(x2, t1, vmulq_f32(x1, t0))), x8);
|
||||
|
||||
/*
|
||||
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5];
|
||||
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
|
||||
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
|
||||
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
|
||||
v2 = glmm_xor(glmm_fmadd(x3, t5, glmm_fnmadd(x1, t3, vmulq_f32(x0, t1))), x8);
|
||||
|
||||
/*
|
||||
dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
|
||||
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
|
||||
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
|
||||
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
|
||||
v1 = glmm_xor(glmm_fmadd(x3, t4, glmm_fnmadd(x2, t3, vmulq_f32(x0, t0))), x9);
|
||||
|
||||
/*
|
||||
dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]);
|
||||
dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5];
|
||||
dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]);
|
||||
dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */
|
||||
v3 = glmm_xor(glmm_fmadd(x2, t5, glmm_fnmadd(x1, t4, vmulq_f32(x0, t2))), x9);
|
||||
|
||||
/* determinant */
|
||||
x0 = vcombine_f32(vget_low_f32(vzipq_f32(v0, v1).val[0]),
|
||||
vget_low_f32(vzipq_f32(v2, v3).val[0]));
|
||||
|
||||
/*
|
||||
x0 = glmm_div(glmm_set1(1.0f), glmm_vhadd(vmulq_f32(x0, r0)));
|
||||
|
||||
glmm_store(dest[0], vmulq_f32(v0, x0));
|
||||
glmm_store(dest[1], vmulq_f32(v1, x0));
|
||||
glmm_store(dest[2], vmulq_f32(v2, x0));
|
||||
glmm_store(dest[3], vmulq_f32(v3, x0));
|
||||
*/
|
||||
|
||||
x0 = glmm_vhadd(vmulq_f32(x0, r0));
|
||||
|
||||
glmm_store(dest[0], glmm_div(v0, x0));
|
||||
glmm_store(dest[1], glmm_div(v1, x0));
|
||||
glmm_store(dest[2], glmm_div(v2, x0));
|
||||
glmm_store(dest[3], glmm_div(v3, x0));
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* cglm_mat4_neon_h */
|
||||
|
||||
@@ -25,29 +25,28 @@ glm_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
|
||||
|
||||
r = glmm_load(m2[0]);
|
||||
glmm_store(dest[0],
|
||||
glmm_fmadd(glmm_shuff1x(r, 0), l0,
|
||||
glmm_fmadd(glmm_shuff1x(r, 1), l1,
|
||||
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
|
||||
glmm_fmadd(glmm_splat(r, 0), l0,
|
||||
glmm_fmadd(glmm_splat(r, 1), l1,
|
||||
_mm_mul_ps(glmm_splat(r, 2), l2))));
|
||||
|
||||
r = glmm_load(m2[1]);
|
||||
glmm_store(dest[1],
|
||||
glmm_fmadd(glmm_shuff1x(r, 0), l0,
|
||||
glmm_fmadd(glmm_shuff1x(r, 1), l1,
|
||||
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
|
||||
glmm_fmadd(glmm_splat(r, 0), l0,
|
||||
glmm_fmadd(glmm_splat(r, 1), l1,
|
||||
_mm_mul_ps(glmm_splat(r, 2), l2))));
|
||||
|
||||
r = glmm_load(m2[2]);
|
||||
glmm_store(dest[2],
|
||||
glmm_fmadd(glmm_shuff1x(r, 0), l0,
|
||||
glmm_fmadd(glmm_shuff1x(r, 1), l1,
|
||||
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
|
||||
glmm_fmadd(glmm_splat(r, 0), l0,
|
||||
glmm_fmadd(glmm_splat(r, 1), l1,
|
||||
_mm_mul_ps(glmm_splat(r, 2), l2))));
|
||||
|
||||
r = glmm_load(m2[3]);
|
||||
glmm_store(dest[3],
|
||||
glmm_fmadd(glmm_shuff1x(r, 0), l0,
|
||||
glmm_fmadd(glmm_shuff1x(r, 1), l1,
|
||||
glmm_fmadd(glmm_shuff1x(r, 2), l2,
|
||||
_mm_mul_ps(glmm_shuff1x(r, 3),
|
||||
l3)))));
|
||||
glmm_fmadd(glmm_splat(r, 0), l0,
|
||||
glmm_fmadd(glmm_splat(r, 1), l1,
|
||||
glmm_fmadd(glmm_splat(r, 2), l2,
|
||||
_mm_mul_ps(glmm_splat(r, 3), l3)))));
|
||||
}
|
||||
|
||||
CGLM_INLINE
|
||||
@@ -63,22 +62,22 @@ glm_mul_rot_sse2(mat4 m1, mat4 m2, mat4 dest) {
|
||||
|
||||
r = glmm_load(m2[0]);
|
||||
glmm_store(dest[0],
|
||||
glmm_fmadd(glmm_shuff1x(r, 0), l0,
|
||||
glmm_fmadd(glmm_shuff1x(r, 1), l1,
|
||||
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
|
||||
glmm_fmadd(glmm_splat(r, 0), l0,
|
||||
glmm_fmadd(glmm_splat(r, 1), l1,
|
||||
_mm_mul_ps(glmm_splat(r, 2), l2))));
|
||||
|
||||
r = glmm_load(m2[1]);
|
||||
glmm_store(dest[1],
|
||||
glmm_fmadd(glmm_shuff1x(r, 0), l0,
|
||||
glmm_fmadd(glmm_shuff1x(r, 1), l1,
|
||||
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
|
||||
glmm_fmadd(glmm_splat(r, 0), l0,
|
||||
glmm_fmadd(glmm_splat(r, 1), l1,
|
||||
_mm_mul_ps(glmm_splat(r, 2), l2))));
|
||||
|
||||
|
||||
r = glmm_load(m2[2]);
|
||||
glmm_store(dest[2],
|
||||
glmm_fmadd(glmm_shuff1x(r, 0), l0,
|
||||
glmm_fmadd(glmm_shuff1x(r, 1), l1,
|
||||
_mm_mul_ps(glmm_shuff1x(r, 2), l2))));
|
||||
glmm_fmadd(glmm_splat(r, 0), l0,
|
||||
glmm_fmadd(glmm_splat(r, 1), l1,
|
||||
_mm_mul_ps(glmm_splat(r, 2), l2))));
|
||||
|
||||
glmm_store(dest[3], l3);
|
||||
}
|
||||
|
||||
@@ -60,11 +60,10 @@ glm_mat4_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
|
||||
\
|
||||
r = glmm_load(m2[C]); \
|
||||
glmm_store(dest[C], \
|
||||
glmm_fmadd(glmm_shuff1x(r, 0), l0, \
|
||||
glmm_fmadd(glmm_shuff1x(r, 1), l1, \
|
||||
glmm_fmadd(glmm_shuff1x(r, 2), l2, \
|
||||
_mm_mul_ps(glmm_shuff1x(r, 3), \
|
||||
l3)))));
|
||||
glmm_fmadd(glmm_splat(r, 0), l0, \
|
||||
glmm_fmadd(glmm_splat(r, 1), l1, \
|
||||
glmm_fmadd(glmm_splat(r, 2), l2, \
|
||||
_mm_mul_ps(glmm_splat(r, 3), l3)))));
|
||||
|
||||
XX(0);
|
||||
XX(1);
|
||||
@@ -80,11 +79,11 @@ glm_mat4_mulv_sse2(mat4 m, vec4 v, vec4 dest) {
|
||||
__m128 x0, x1;
|
||||
|
||||
x0 = glmm_load(v);
|
||||
x1 = glmm_fmadd(glmm_load(m[0]), glmm_shuff1x(x0, 0),
|
||||
glmm_fmadd(glmm_load(m[1]), glmm_shuff1x(x0, 1),
|
||||
glmm_fmadd(glmm_load(m[2]), glmm_shuff1x(x0, 2),
|
||||
x1 = glmm_fmadd(glmm_load(m[0]), glmm_splat(x0, 0),
|
||||
glmm_fmadd(glmm_load(m[1]), glmm_splat(x0, 1),
|
||||
glmm_fmadd(glmm_load(m[2]), glmm_splat(x0, 2),
|
||||
_mm_mul_ps(glmm_load(m[3]),
|
||||
glmm_shuff1x(x0, 3)))));
|
||||
glmm_splat(x0, 3)))));
|
||||
|
||||
glmm_store(dest, x1);
|
||||
}
|
||||
|
||||
@@ -27,15 +27,15 @@ glm_quat_mul_sse2(versor p, versor q, versor dest) {
|
||||
xp = glmm_load(p); /* 3 2 1 0 */
|
||||
xq = glmm_load(q);
|
||||
|
||||
r = _mm_mul_ps(glmm_shuff1x(xp, 3), xq);
|
||||
r = _mm_mul_ps(glmm_splat(xp, 3), xq);
|
||||
|
||||
x0 = _mm_xor_ps(glmm_shuff1x(xp, 0), _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
|
||||
x0 = _mm_xor_ps(glmm_splat(xp, 0), _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
|
||||
r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 0, 1, 2, 3)));
|
||||
|
||||
x0 = _mm_xor_ps(glmm_shuff1x(xp, 1), _mm_set_ps(-0.f, -0.f, 0.f, 0.f));
|
||||
x0 = _mm_xor_ps(glmm_splat(xp, 1), _mm_set_ps(-0.f, -0.f, 0.f, 0.f));
|
||||
r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 1, 0, 3, 2)));
|
||||
|
||||
x0 = _mm_xor_ps(glmm_shuff1x(xp, 2), _mm_set_ps(-0.f, 0.f, 0.f, -0.f));
|
||||
x0 = _mm_xor_ps(glmm_splat(xp, 2), _mm_set_ps(-0.f, 0.f, 0.f, -0.f));
|
||||
r = _mm_add_ps(r, _mm_mul_ps(x0, glmm_shuff1(xq, 2, 3, 0, 1)));
|
||||
|
||||
glmm_store(dest, r);
|
||||
|
||||
@@ -18,6 +18,9 @@
|
||||
# define glmm_store(p, a) _mm_store_ps(p, a)
|
||||
#endif
|
||||
|
||||
#define glmm_set1(x) _mm_set1_ps(x)
|
||||
#define glmm_128 __m128
|
||||
|
||||
#ifdef CGLM_USE_INT_DOMAIN
|
||||
# define glmm_shuff1(xmm, z, y, x, w) \
|
||||
_mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(xmm), \
|
||||
@@ -27,7 +30,16 @@
|
||||
_mm_shuffle_ps(xmm, xmm, _MM_SHUFFLE(z, y, x, w))
|
||||
#endif
|
||||
|
||||
#define glmm_splat(x, lane) glmm_shuff1(x, lane, lane, lane, lane)
|
||||
|
||||
#define glmm_splat_x(x) glmm_splat(x, 0)
|
||||
#define glmm_splat_y(x) glmm_splat(x, 1)
|
||||
#define glmm_splat_z(x) glmm_splat(x, 2)
|
||||
#define glmm_splat_w(x) glmm_splat(x, 3)
|
||||
|
||||
/* glmm_shuff1x() is DEPRECATED!, use glmm_splat() */
|
||||
#define glmm_shuff1x(xmm, x) glmm_shuff1(xmm, x, x, x, x)
|
||||
|
||||
#define glmm_shuff2(a, b, z0, y0, x0, w0, z1, y1, x1, w1) \
|
||||
glmm_shuff1(_mm_shuffle_ps(a, b, _MM_SHUFFLE(z0, y0, x0, w0)), \
|
||||
z1, y1, x1, w1)
|
||||
@@ -89,7 +101,7 @@ glmm_vhmin(__m128 v) {
|
||||
__m128 x0, x1, x2;
|
||||
x0 = _mm_movehl_ps(v, v); /* [2, 3, 2, 3] */
|
||||
x1 = _mm_min_ps(x0, v); /* [0|2, 1|3, 2|2, 3|3] */
|
||||
x2 = glmm_shuff1x(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
|
||||
x2 = glmm_splat(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
|
||||
return _mm_min_ss(x1, x2);
|
||||
}
|
||||
|
||||
@@ -105,7 +117,7 @@ glmm_vhmax(__m128 v) {
|
||||
__m128 x0, x1, x2;
|
||||
x0 = _mm_movehl_ps(v, v); /* [2, 3, 2, 3] */
|
||||
x1 = _mm_max_ps(x0, v); /* [0|2, 1|3, 2|2, 3|3] */
|
||||
x2 = glmm_shuff1x(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
|
||||
x2 = glmm_splat(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
|
||||
return _mm_max_ss(x1, x2);
|
||||
}
|
||||
|
||||
@@ -197,6 +209,12 @@ glmm_store3(float v[3], __m128 vx) {
|
||||
_mm_store_ss(&v[2], glmm_shuff1(vx, 2, 2, 2, 2));
|
||||
}
|
||||
|
||||
static inline
|
||||
__m128
|
||||
glmm_div(__m128 a, __m128 b) {
|
||||
return _mm_div_ps(a, b);
|
||||
}
|
||||
|
||||
/* enable FMA macro for MSVC? */
|
||||
#if defined(_MSC_VER) && !defined(__FMA__) && defined(__AVX2__)
|
||||
# define __FMA__ 1
|
||||
|
||||
@@ -224,10 +224,10 @@ glm_vec4_sign(vec4 v, vec4 dest) {
|
||||
|
||||
x0 = glmm_load(v);
|
||||
x1 = _mm_set_ps(0.0f, 0.0f, 1.0f, -1.0f);
|
||||
x2 = glmm_shuff1x(x1, 2);
|
||||
x2 = glmm_splat(x1, 2);
|
||||
|
||||
x3 = _mm_and_ps(_mm_cmpgt_ps(x0, x2), glmm_shuff1x(x1, 1));
|
||||
x4 = _mm_and_ps(_mm_cmplt_ps(x0, x2), glmm_shuff1x(x1, 0));
|
||||
x3 = _mm_and_ps(_mm_cmpgt_ps(x0, x2), glmm_splat(x1, 1));
|
||||
x4 = _mm_and_ps(_mm_cmplt_ps(x0, x2), glmm_splat(x1, 0));
|
||||
|
||||
glmm_store(dest, _mm_or_ps(x3, x4));
|
||||
#else
|
||||
|
||||
@@ -473,8 +473,8 @@ glm_vec4_scale_as(vec4 v, float s, vec4 dest) {
|
||||
CGLM_INLINE
|
||||
void
|
||||
glm_vec4_div(vec4 a, vec4 b, vec4 dest) {
|
||||
#if defined( __SSE__ ) || defined( __SSE2__ )
|
||||
glmm_store(dest, _mm_div_ps(glmm_load(a), glmm_load(b)));
|
||||
#if defined(CGLM_SIMD)
|
||||
glmm_store(dest, glmm_div(glmm_load(a), glmm_load(b)));
|
||||
#else
|
||||
dest[0] = a[0] / b[0];
|
||||
dest[1] = a[1] / b[1];
|
||||
@@ -590,10 +590,8 @@ glm_vec4_muladd(vec4 a, vec4 b, vec4 dest) {
|
||||
CGLM_INLINE
|
||||
void
|
||||
glm_vec4_muladds(vec4 a, float s, vec4 dest) {
|
||||
#if defined( __SSE__ ) || defined( __SSE2__ )
|
||||
glmm_store(dest, glmm_fmadd(glmm_load(a), _mm_set1_ps(s), glmm_load(dest)));
|
||||
#elif defined(CGLM_NEON_FP)
|
||||
glmm_store(dest, glmm_fmadd(glmm_load(a), vdupq_n_f32(s), glmm_load(dest)));
|
||||
#if defined(CGLM_SIMD)
|
||||
glmm_store(dest, glmm_fmadd(glmm_load(a), glmm_set1(s), glmm_load(dest)));
|
||||
#else
|
||||
dest[0] += a[0] * s;
|
||||
dest[1] += a[1] * s;
|
||||
|
||||
Reference in New Issue
Block a user