simd128: enable in headers

This commit is contained in:
myfreeer
2023-03-06 17:07:45 +08:00
parent 6b7a63953c
commit 71c585b159
4 changed files with 76 additions and 1 deletions

View File

@@ -168,6 +168,8 @@ void
glm_mat2_transpose_to(mat2 m, mat2 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glm_mat2_transp_sse2(m, dest);
#elif defined(__wasm__) && defined(__wasm_simd128__)
glm_mat2_transp_wasm(m, dest);
#else
dest[0][0] = m[0][0];
dest[0][1] = m[1][0];
@@ -230,6 +232,8 @@ void
glm_mat2_scale(mat2 m, float s) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(m[0], _mm_mul_ps(_mm_loadu_ps(m[0]), _mm_set1_ps(s)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(m[0], wasm_f32x4_mul(wasm_v128_load(m[0]), wasm_f32x4_splat(s)));
#elif defined(CGLM_NEON_FP)
vst1q_f32(m[0], vmulq_f32(vld1q_f32(m[0]), vdupq_n_f32(s)));
#else

View File

@@ -6,7 +6,7 @@
#include <wasm_simd128.h>
#define glmm_load(p) wasm_v128_load(p)
#define glmm_store(p, a) wasm_v128_store(p, a)
#define glmm_store(p, a) wasm_v128_store(p, (a))
#define glmm_set1(x) wasm_f32x4_splat(x)
#define glmm_128 v128_t

View File

@@ -47,6 +47,8 @@ void
glm_vec4_broadcast(float val, vec4 d) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(d, _mm_set1_ps(val));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(d, wasm_f32x4_splat(val));
#else
d[0] = d[1] = d[2] = d[3] = val;
#endif
@@ -63,6 +65,8 @@ void
glm_vec4_fill(vec4 v, float val) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(v, _mm_set1_ps(val));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(v, wasm_f32x4_splat(val));
#else
v[0] = v[1] = v[2] = v[3] = val;
#endif
@@ -249,6 +253,8 @@ void
glm_vec4_abs(vec4 v, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, glmm_abs(glmm_load(v)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, glmm_abs(glmm_load(v)));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vabsq_f32(vld1q_f32(v)));
#else
@@ -286,6 +292,8 @@ float
glm_vec4_hadd(vec4 v) {
#if defined( __SSE__ ) || defined( __SSE2__ )
return glmm_hadd(glmm_load(v));
#elif defined(__wasm__) && defined(__wasm_simd128__)
return glmm_hadd(glmm_load(v));
#else
return v[0] + v[1] + v[2] + v[3];
#endif
@@ -302,6 +310,8 @@ void
glm_vec4_sqrt(vec4 v, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_sqrt_ps(glmm_load(v)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_sqrt(glmm_load(v)));
#else
dest[0] = sqrtf(v[0]);
dest[1] = sqrtf(v[1]);

View File

@@ -139,6 +139,8 @@ void
glm_vec4_copy(vec4 v, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, glmm_load(v));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, glmm_load(v));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vld1q_f32(v));
#else
@@ -176,6 +178,8 @@ void
glm_vec4_zero(vec4 v) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(v, _mm_setzero_ps());
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(v, wasm_f32x4_const(0.f, 0.f, 0.f, 0.f));
#elif defined(CGLM_NEON_FP)
vst1q_f32(v, vdupq_n_f32(0.0f));
#else
@@ -196,6 +200,8 @@ void
glm_vec4_one(vec4 v) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(v, _mm_set1_ps(1.0f));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(v, wasm_f32x4_splat(1.0f));
#elif defined(CGLM_NEON_FP)
vst1q_f32(v, vdupq_n_f32(1.0f));
#else
@@ -322,6 +328,8 @@ void
glm_vec4_add(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_add_ps(glmm_load(a), glmm_load(b)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, _mm_add_ps(glmm_load(a), glmm_load(b)));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vaddq_f32(vld1q_f32(a), vld1q_f32(b)));
#else
@@ -344,6 +352,8 @@ void
glm_vec4_adds(vec4 v, float s, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_add_ps(glmm_load(v), _mm_set1_ps(s)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_add(glmm_load(v), wasm_f32x4_splat(s)));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vaddq_f32(vld1q_f32(v), vdupq_n_f32(s)));
#else
@@ -366,6 +376,8 @@ void
glm_vec4_sub(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_sub_ps(glmm_load(a), glmm_load(b)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_sub(glmm_load(a), glmm_load(b)));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vsubq_f32(vld1q_f32(a), vld1q_f32(b)));
#else
@@ -388,6 +400,8 @@ void
glm_vec4_subs(vec4 v, float s, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_sub_ps(glmm_load(v), _mm_set1_ps(s)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_sub(glmm_load(v), wasm_f32x4_splat(s)));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vsubq_f32(vld1q_f32(v), vdupq_n_f32(s)));
#else
@@ -432,6 +446,8 @@ void
glm_vec4_scale(vec4 v, float s, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_mul_ps(glmm_load(v), _mm_set1_ps(s)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_mul(glmm_load(v), wasm_f32x4_splat(s)));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vmulq_f32(vld1q_f32(v), vdupq_n_f32(s)));
#else
@@ -495,6 +511,8 @@ void
glm_vec4_divs(vec4 v, float s, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_div_ps(glmm_load(v), _mm_set1_ps(s)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_div(glmm_load(v), wasm_f32x4_splat(s)));
#else
glm_vec4_scale(v, 1.0f / s, dest);
#endif
@@ -516,6 +534,10 @@ glm_vec4_addadd(vec4 a, vec4 b, vec4 dest) {
glmm_store(dest, _mm_add_ps(glmm_load(dest),
_mm_add_ps(glmm_load(a),
glmm_load(b))));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_add(glmm_load(dest),
wasm_f32x4_add(glmm_load(a),
glmm_load(b))));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vaddq_f32(vld1q_f32(dest),
vaddq_f32(vld1q_f32(a),
@@ -544,6 +566,10 @@ glm_vec4_subadd(vec4 a, vec4 b, vec4 dest) {
glmm_store(dest, _mm_add_ps(glmm_load(dest),
_mm_sub_ps(glmm_load(a),
glmm_load(b))));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_add(glmm_load(dest),
wasm_f32x4_sub(glmm_load(a),
glmm_load(b))));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vaddq_f32(vld1q_f32(dest),
vsubq_f32(vld1q_f32(a),
@@ -616,6 +642,10 @@ glm_vec4_maxadd(vec4 a, vec4 b, vec4 dest) {
glmm_store(dest, _mm_add_ps(glmm_load(dest),
_mm_max_ps(glmm_load(a),
glmm_load(b))));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_add(glmm_load(dest),
wasm_f32x4_max(glmm_load(a),
glmm_load(b))));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vaddq_f32(vld1q_f32(dest),
vmaxq_f32(vld1q_f32(a),
@@ -644,6 +674,10 @@ glm_vec4_minadd(vec4 a, vec4 b, vec4 dest) {
glmm_store(dest, _mm_add_ps(glmm_load(dest),
_mm_min_ps(glmm_load(a),
glmm_load(b))));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_add(glmm_load(dest),
wasm_f32x4_min(glmm_load(a),
glmm_load(b))));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vaddq_f32(vld1q_f32(dest),
vminq_f32(vld1q_f32(a),
@@ -667,6 +701,8 @@ void
glm_vec4_negate_to(vec4 v, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_xor_ps(glmm_load(v), _mm_set1_ps(-0.0f)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_v128_xor(glmm_load(v), wasm_f32x4_splat(-0.0f)));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vnegq_f32(vld1q_f32(v)));
#else
@@ -711,6 +747,20 @@ glm_vec4_normalize_to(vec4 v, vec4 dest) {
}
glmm_store(dest, _mm_div_ps(x0, _mm_sqrt_ps(xdot)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_128 xdot, x0;
float dot;
x0 = glmm_load(v);
xdot = glmm_vdot(x0, x0);
dot = _mm_cvtss_f32(xdot);
if (dot == 0.0f) {
glmm_store(dest, wasm_f32x4_const(0.f, 0.f, 0.f, 0.f));
return;
}
glmm_store(dest, wasm_f32x4_div(x0, wasm_f32x4_sqrt(xdot)));
#else
float norm;
@@ -748,6 +798,8 @@ float
glm_vec4_distance(vec4 a, vec4 b) {
#if defined( __SSE__ ) || defined( __SSE2__ )
return glmm_norm(_mm_sub_ps(glmm_load(a), glmm_load(b)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
return glmm_norm(wasm_f32x4_sub(glmm_load(a), glmm_load(b)));
#elif defined(CGLM_NEON_FP)
return glmm_norm(vsubq_f32(glmm_load(a), glmm_load(b)));
#else
@@ -770,6 +822,8 @@ float
glm_vec4_distance2(vec4 a, vec4 b) {
#if defined( __SSE__ ) || defined( __SSE2__ )
return glmm_norm2(_mm_sub_ps(glmm_load(a), glmm_load(b)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
return glmm_norm2(wasm_f32x4_sub(glmm_load(a), glmm_load(b)));
#elif defined(CGLM_NEON_FP)
return glmm_norm2(vsubq_f32(glmm_load(a), glmm_load(b)));
#else
@@ -792,6 +846,8 @@ void
glm_vec4_maxv(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_max_ps(glmm_load(a), glmm_load(b)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_max(glmm_load(a), glmm_load(b)));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vmaxq_f32(vld1q_f32(a), vld1q_f32(b)));
#else
@@ -814,6 +870,8 @@ void
glm_vec4_minv(vec4 a, vec4 b, vec4 dest) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(dest, _mm_min_ps(glmm_load(a), glmm_load(b)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(dest, wasm_f32x4_min(glmm_load(a), glmm_load(b)));
#elif defined(CGLM_NEON_FP)
vst1q_f32(dest, vminq_f32(vld1q_f32(a), vld1q_f32(b)));
#else
@@ -837,6 +895,9 @@ glm_vec4_clamp(vec4 v, float minVal, float maxVal) {
#if defined( __SSE__ ) || defined( __SSE2__ )
glmm_store(v, _mm_min_ps(_mm_max_ps(glmm_load(v), _mm_set1_ps(minVal)),
_mm_set1_ps(maxVal)));
#elif defined(__wasm__) && defined(__wasm_simd128__)
glmm_store(v, wasm_f32x4_min(wasm_f32x4_max(glmm_load(v), wasm_f32x4_splat(minVal)),
wasm_f32x4_splat(maxVal)));
#elif defined(CGLM_NEON_FP)
vst1q_f32(v, vminq_f32(vmaxq_f32(vld1q_f32(v), vdupq_n_f32(minVal)),
vdupq_n_f32(maxVal)));