From 71c585b159e56cbafaf28504321e46f278b97d71 Mon Sep 17 00:00:00 2001 From: myfreeer Date: Mon, 6 Mar 2023 17:07:45 +0800 Subject: [PATCH] simd128: enable in headers --- include/cglm/mat2.h | 4 +++ include/cglm/simd/wasm.h | 2 +- include/cglm/vec4-ext.h | 10 +++++++ include/cglm/vec4.h | 61 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 76 insertions(+), 1 deletion(-) diff --git a/include/cglm/mat2.h b/include/cglm/mat2.h index 9f430fb..dc71185 100644 --- a/include/cglm/mat2.h +++ b/include/cglm/mat2.h @@ -168,6 +168,8 @@ void glm_mat2_transpose_to(mat2 m, mat2 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glm_mat2_transp_sse2(m, dest); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glm_mat2_transp_wasm(m, dest); #else dest[0][0] = m[0][0]; dest[0][1] = m[1][0]; @@ -230,6 +232,8 @@ void glm_mat2_scale(mat2 m, float s) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(m[0], _mm_mul_ps(_mm_loadu_ps(m[0]), _mm_set1_ps(s))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(m[0], wasm_f32x4_mul(wasm_v128_load(m[0]), wasm_f32x4_splat(s))); #elif defined(CGLM_NEON_FP) vst1q_f32(m[0], vmulq_f32(vld1q_f32(m[0]), vdupq_n_f32(s))); #else diff --git a/include/cglm/simd/wasm.h b/include/cglm/simd/wasm.h index 5015ec9..f6403d1 100644 --- a/include/cglm/simd/wasm.h +++ b/include/cglm/simd/wasm.h @@ -6,7 +6,7 @@ #include #define glmm_load(p) wasm_v128_load(p) -#define glmm_store(p, a) wasm_v128_store(p, a) +#define glmm_store(p, a) wasm_v128_store(p, (a)) #define glmm_set1(x) wasm_f32x4_splat(x) #define glmm_128 v128_t diff --git a/include/cglm/vec4-ext.h b/include/cglm/vec4-ext.h index e4e20cb..06b6210 100644 --- a/include/cglm/vec4-ext.h +++ b/include/cglm/vec4-ext.h @@ -47,6 +47,8 @@ void glm_vec4_broadcast(float val, vec4 d) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(d, _mm_set1_ps(val)); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(d, wasm_f32x4_splat(val)); #else d[0] = d[1] = d[2] = d[3] = val; #endif @@ -63,6 +65,8 @@ void glm_vec4_fill(vec4 v, float val) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(v, _mm_set1_ps(val)); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(v, wasm_f32x4_splat(val)); #else v[0] = v[1] = v[2] = v[3] = val; #endif @@ -249,6 +253,8 @@ void glm_vec4_abs(vec4 v, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, glmm_abs(glmm_load(v))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, glmm_abs(glmm_load(v))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vabsq_f32(vld1q_f32(v))); #else @@ -286,6 +292,8 @@ float glm_vec4_hadd(vec4 v) { #if defined( __SSE__ ) || defined( __SSE2__ ) return glmm_hadd(glmm_load(v)); +#elif defined(__wasm__) && defined(__wasm_simd128__) + return glmm_hadd(glmm_load(v)); #else return v[0] + v[1] + v[2] + v[3]; #endif @@ -302,6 +310,8 @@ void glm_vec4_sqrt(vec4 v, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, _mm_sqrt_ps(glmm_load(v))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_sqrt(glmm_load(v))); #else dest[0] = sqrtf(v[0]); dest[1] = sqrtf(v[1]); diff --git a/include/cglm/vec4.h b/include/cglm/vec4.h index 8e95ec5..587597e 100644 --- a/include/cglm/vec4.h +++ b/include/cglm/vec4.h @@ -139,6 +139,8 @@ void glm_vec4_copy(vec4 v, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, glmm_load(v)); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, glmm_load(v)); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vld1q_f32(v)); #else @@ -176,6 +178,8 @@ void glm_vec4_zero(vec4 v) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(v, _mm_setzero_ps()); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(v, wasm_f32x4_const(0.f, 0.f, 0.f, 0.f)); #elif defined(CGLM_NEON_FP) vst1q_f32(v, vdupq_n_f32(0.0f)); #else @@ -196,6 +200,8 @@ void glm_vec4_one(vec4 v) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(v, _mm_set1_ps(1.0f)); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(v, wasm_f32x4_splat(1.0f)); #elif defined(CGLM_NEON_FP) vst1q_f32(v, vdupq_n_f32(1.0f)); #else @@ -322,6 +328,8 @@ void glm_vec4_add(vec4 a, vec4 b, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, _mm_add_ps(glmm_load(a), glmm_load(b))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, _mm_add_ps(glmm_load(a), glmm_load(b))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vaddq_f32(vld1q_f32(a), vld1q_f32(b))); #else @@ -344,6 +352,8 @@ void glm_vec4_adds(vec4 v, float s, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, _mm_add_ps(glmm_load(v), _mm_set1_ps(s))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_add(glmm_load(v), wasm_f32x4_splat(s))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vaddq_f32(vld1q_f32(v), vdupq_n_f32(s))); #else @@ -366,6 +376,8 @@ void glm_vec4_sub(vec4 a, vec4 b, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, _mm_sub_ps(glmm_load(a), glmm_load(b))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_sub(glmm_load(a), glmm_load(b))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vsubq_f32(vld1q_f32(a), vld1q_f32(b))); #else @@ -388,6 +400,8 @@ void glm_vec4_subs(vec4 v, float s, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, _mm_sub_ps(glmm_load(v), _mm_set1_ps(s))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_sub(glmm_load(v), wasm_f32x4_splat(s))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vsubq_f32(vld1q_f32(v), vdupq_n_f32(s))); #else @@ -432,6 +446,8 @@ void glm_vec4_scale(vec4 v, float s, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, _mm_mul_ps(glmm_load(v), _mm_set1_ps(s))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_mul(glmm_load(v), wasm_f32x4_splat(s))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vmulq_f32(vld1q_f32(v), vdupq_n_f32(s))); #else @@ -495,6 +511,8 @@ void glm_vec4_divs(vec4 v, float s, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, _mm_div_ps(glmm_load(v), _mm_set1_ps(s))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_div(glmm_load(v), wasm_f32x4_splat(s))); #else glm_vec4_scale(v, 1.0f / s, dest); #endif @@ -516,6 +534,10 @@ glm_vec4_addadd(vec4 a, vec4 b, vec4 dest) { glmm_store(dest, _mm_add_ps(glmm_load(dest), _mm_add_ps(glmm_load(a), glmm_load(b)))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_add(glmm_load(dest), + wasm_f32x4_add(glmm_load(a), + glmm_load(b)))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vaddq_f32(vld1q_f32(dest), vaddq_f32(vld1q_f32(a), @@ -544,6 +566,10 @@ glm_vec4_subadd(vec4 a, vec4 b, vec4 dest) { glmm_store(dest, _mm_add_ps(glmm_load(dest), _mm_sub_ps(glmm_load(a), glmm_load(b)))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_add(glmm_load(dest), + wasm_f32x4_sub(glmm_load(a), + glmm_load(b)))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vaddq_f32(vld1q_f32(dest), vsubq_f32(vld1q_f32(a), @@ -616,6 +642,10 @@ glm_vec4_maxadd(vec4 a, vec4 b, vec4 dest) { glmm_store(dest, _mm_add_ps(glmm_load(dest), _mm_max_ps(glmm_load(a), glmm_load(b)))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_add(glmm_load(dest), + wasm_f32x4_max(glmm_load(a), + glmm_load(b)))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vaddq_f32(vld1q_f32(dest), vmaxq_f32(vld1q_f32(a), @@ -644,6 +674,10 @@ glm_vec4_minadd(vec4 a, vec4 b, vec4 dest) { glmm_store(dest, _mm_add_ps(glmm_load(dest), _mm_min_ps(glmm_load(a), glmm_load(b)))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_add(glmm_load(dest), + wasm_f32x4_min(glmm_load(a), + glmm_load(b)))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vaddq_f32(vld1q_f32(dest), vminq_f32(vld1q_f32(a), @@ -667,6 +701,8 @@ void glm_vec4_negate_to(vec4 v, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, _mm_xor_ps(glmm_load(v), _mm_set1_ps(-0.0f))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_v128_xor(glmm_load(v), wasm_f32x4_splat(-0.0f))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vnegq_f32(vld1q_f32(v))); #else @@ -711,6 +747,20 @@ glm_vec4_normalize_to(vec4 v, vec4 dest) { } glmm_store(dest, _mm_div_ps(x0, _mm_sqrt_ps(xdot))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_128 xdot, x0; + float dot; + + x0 = glmm_load(v); + xdot = glmm_vdot(x0, x0); + dot = _mm_cvtss_f32(xdot); + + if (dot == 0.0f) { + glmm_store(dest, wasm_f32x4_const(0.f, 0.f, 0.f, 0.f)); + return; + } + + glmm_store(dest, wasm_f32x4_div(x0, wasm_f32x4_sqrt(xdot))); #else float norm; @@ -748,6 +798,8 @@ float glm_vec4_distance(vec4 a, vec4 b) { #if defined( __SSE__ ) || defined( __SSE2__ ) return glmm_norm(_mm_sub_ps(glmm_load(a), glmm_load(b))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + return glmm_norm(wasm_f32x4_sub(glmm_load(a), glmm_load(b))); #elif defined(CGLM_NEON_FP) return glmm_norm(vsubq_f32(glmm_load(a), glmm_load(b))); #else @@ -770,6 +822,8 @@ float glm_vec4_distance2(vec4 a, vec4 b) { #if defined( __SSE__ ) || defined( __SSE2__ ) return glmm_norm2(_mm_sub_ps(glmm_load(a), glmm_load(b))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + return glmm_norm2(wasm_f32x4_sub(glmm_load(a), glmm_load(b))); #elif defined(CGLM_NEON_FP) return glmm_norm2(vsubq_f32(glmm_load(a), glmm_load(b))); #else @@ -792,6 +846,8 @@ void glm_vec4_maxv(vec4 a, vec4 b, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, _mm_max_ps(glmm_load(a), glmm_load(b))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_max(glmm_load(a), glmm_load(b))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vmaxq_f32(vld1q_f32(a), vld1q_f32(b))); #else @@ -814,6 +870,8 @@ void glm_vec4_minv(vec4 a, vec4 b, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, _mm_min_ps(glmm_load(a), glmm_load(b))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(dest, wasm_f32x4_min(glmm_load(a), glmm_load(b))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vminq_f32(vld1q_f32(a), vld1q_f32(b))); #else @@ -837,6 +895,9 @@ glm_vec4_clamp(vec4 v, float minVal, float maxVal) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(v, _mm_min_ps(_mm_max_ps(glmm_load(v), _mm_set1_ps(minVal)), _mm_set1_ps(maxVal))); +#elif defined(__wasm__) && defined(__wasm_simd128__) + glmm_store(v, wasm_f32x4_min(wasm_f32x4_max(glmm_load(v), wasm_f32x4_splat(minVal)), + wasm_f32x4_splat(maxVal))); #elif defined(CGLM_NEON_FP) vst1q_f32(v, vminq_f32(vmaxq_f32(vld1q_f32(v), vdupq_n_f32(minVal)), vdupq_n_f32(maxVal)));