diff --git a/include/cglm/simd/wasm.h b/include/cglm/simd/wasm.h index 48cda4c..648e1bb 100644 --- a/include/cglm/simd/wasm.h +++ b/include/cglm/simd/wasm.h @@ -181,8 +181,7 @@ glmm_fmsub(glmm_128 a, glmm_128 b, glmm_128 c) { static inline glmm_128 glmm_fnmsub(glmm_128 a, glmm_128 b, glmm_128 c) { - return wasm_v128_xor(wasm_f32x4_add(wasm_f32x4_mul(a, b), c), - wasm_f32x4_const_splat(-0.0f)); + return wasm_f32x4_neg(wasm_f32x4_add(wasm_f32x4_mul(a, b), c)); } #endif diff --git a/include/cglm/simd/wasm/affine.h b/include/cglm/simd/wasm/affine.h index 59b36ac..80b98fb 100644 --- a/include/cglm/simd/wasm/affine.h +++ b/include/cglm/simd/wasm/affine.h @@ -110,11 +110,10 @@ glm_inv_tr_wasm(mat4 mat) { x2 = glmm_shuff1(r3, 0, 0, 0, 0); x3 = glmm_shuff1(r3, 1, 1, 1, 1); x4 = glmm_shuff1(r3, 2, 2, 2, 2); - x5 = wasm_f32x4_const_splat(-0.f); x0 = glmm_fmadd(r0, x2, glmm_fmadd(r1, x3, wasm_f32x4_mul(r2, x4))); - x0 = wasm_v128_xor(x0, x5); + x0 = wasm_f32x4_neg(x0); x0 = wasm_f32x4_add(x0, x1); diff --git a/include/cglm/vec4.h b/include/cglm/vec4.h index 1d18625..de29941 100644 --- a/include/cglm/vec4.h +++ b/include/cglm/vec4.h @@ -707,8 +707,7 @@ CGLM_INLINE void glm_vec4_negate_to(vec4 v, vec4 dest) { #if defined(__wasm__) && defined(__wasm_simd128__) - glmm_store(dest, wasm_v128_xor(glmm_load(v), - wasm_f32x4_const_splat(-0.0f))); + glmm_store(dest, wasm_f32x4_neg(glmm_load(v))); #elif defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, _mm_xor_ps(glmm_load(v), _mm_set1_ps(-0.0f))); #elif defined(CGLM_NEON_FP)