diff --git a/include/cglm/mat2.h b/include/cglm/mat2.h index 7fba348..1da0cd4 100644 --- a/include/cglm/mat2.h +++ b/include/cglm/mat2.h @@ -235,7 +235,7 @@ glm_mat2_scale(mat2 m, float s) { glmm_store(m[0], wasm_f32x4_mul(wasm_v128_load(m[0]), wasm_f32x4_splat(s))); #elif defined( __SSE__ ) || defined( __SSE2__ ) - glmm_store(m[0], _mm_mul_ps(_mm_loadu_ps(m[0]), _mm_set1_ps(s))); + glmm_store(m[0], _mm_mul_ps(_mm_loadu_ps(m[0]), glmm_set1(s))); #elif defined(CGLM_NEON_FP) vst1q_f32(m[0], vmulq_f32(vld1q_f32(m[0]), vdupq_n_f32(s))); #else diff --git a/include/cglm/simd/avx/mat4.h b/include/cglm/simd/avx/mat4.h index e8c36c8..f5812de 100644 --- a/include/cglm/simd/avx/mat4.h +++ b/include/cglm/simd/avx/mat4.h @@ -18,8 +18,8 @@ CGLM_INLINE void glm_mat4_scale_avx(mat4 m, float s) { __m256 y0; - y0 = _mm256_set1_ps(s); - + y0 = _mm256_broadcast_ss(&s); + glmm_store256(m[0], _mm256_mul_ps(y0, glmm_load256(m[0]))); glmm_store256(m[2], _mm256_mul_ps(y0, glmm_load256(m[2]))); } diff --git a/include/cglm/simd/sse2/mat4.h b/include/cglm/simd/sse2/mat4.h index fb6d2f2..5df7254 100644 --- a/include/cglm/simd/sse2/mat4.h +++ b/include/cglm/simd/sse2/mat4.h @@ -18,7 +18,7 @@ CGLM_INLINE void glm_mat4_scale_sse2(mat4 m, float s) { __m128 x0; - x0 = _mm_set1_ps(s); + x0 = glmm_set1(s); glmm_store(m[0], _mm_mul_ps(glmm_load(m[0]), x0)); glmm_store(m[1], _mm_mul_ps(glmm_load(m[1]), x0)); @@ -426,7 +426,7 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) { x1 = _mm_shuffle_ps(v2, v3, _MM_SHUFFLE(0, 0, 0, 0)); x0 = _mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 0, 2, 0)); - x0 = _mm_div_ps(_mm_set1_ps(1.0f), glmm_vhadd(_mm_mul_ps(x0, r0))); + x0 = _mm_div_ps(glmm_set1(1.0f), glmm_vhadd(_mm_mul_ps(x0, r0))); glmm_store(dest[0], _mm_mul_ps(v0, x0)); glmm_store(dest[1], _mm_mul_ps(v1, x0)); @@ -494,7 +494,7 @@ glm_mat4_inv_sse2(mat4 mat, mat4 dest) { /* v0: c3 * c10 + c4 * c9 + c1 * c8 + c2 * c7 */ /* v1: c5 * c12 + c6 * c11 */ - v5 = _mm_set1_ps(1.0f); + v5 = glmm_set1(1.0f); v0 = glmm_shuff1(t2, 2, 3, 0, 1); v1 = glmm_shuff1(t1, 0, 1, 2, 3); v0 = _mm_mul_ps(t0, v0); diff --git a/include/cglm/simd/x86.h b/include/cglm/simd/x86.h index 81081dc..657d9ba 100644 --- a/include/cglm/simd/x86.h +++ b/include/cglm/simd/x86.h @@ -18,9 +18,16 @@ # define glmm_store(p, a) _mm_store_ps(p, a) #endif -#define glmm_set1(x) _mm_set1_ps(x) #define glmm_128 __m128 +#ifdef __AVX__ +# define glmm_set1(x) _mm_broadcast_ss(&x) +# define glmm_set1_ptr(x) _mm_broadcast_ss(x) +#else +# define glmm_set1(x) _mm_set1_ps(x) +# define glmm_set1_ptr(x) _mm_set1_ps(*x) +#endif + #if defined(CGLM_USE_INT_DOMAIN) && defined(__SSE2__) # define glmm_shuff1(xmm, z, y, x, w) \ _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(xmm), \ @@ -86,7 +93,7 @@ #if defined(__SSE2__) # define glmm_float32x4_SIGNMASK_NEG _mm_castsi128_ps(_mm_set1_epi32(GLMM_NEGZEROf)) /* _mm_set1_ps(-0.0f) */ #else -# define glmm_float32x4_SIGNMASK_NEG _mm_set1_ps(GLMM_NEGZEROf) +# define glmm_float32x4_SIGNMASK_NEG glmm_set1(GLMM_NEGZEROf) #endif #define glmm_float32x8_SIGNMASK_NEG _mm256_castsi256_ps(_mm256_set1_epi32(GLMM_NEGZEROf)) diff --git a/include/cglm/vec4-ext.h b/include/cglm/vec4-ext.h index cc09ee1..b3850a0 100644 --- a/include/cglm/vec4-ext.h +++ b/include/cglm/vec4-ext.h @@ -48,7 +48,7 @@ glm_vec4_broadcast(float val, vec4 d) { #if defined(__wasm__) && defined(__wasm_simd128__) glmm_store(d, wasm_f32x4_splat(val)); #elif defined( __SSE__ ) || defined( __SSE2__ ) - glmm_store(d, _mm_set1_ps(val)); + glmm_store(d, glmm_set1(val)); #else d[0] = d[1] = d[2] = d[3] = val; #endif @@ -66,7 +66,7 @@ glm_vec4_fill(vec4 v, float val) { #if defined(__wasm__) && defined(__wasm_simd128__) glmm_store(v, wasm_f32x4_splat(val)); #elif defined( __SSE__ ) || defined( __SSE2__ ) - glmm_store(v, _mm_set1_ps(val)); + glmm_store(v, glmm_set1(val)); #else v[0] = v[1] = v[2] = v[3] = val; #endif diff --git a/include/cglm/vec4.h b/include/cglm/vec4.h index e24675f..a85419b 100644 --- a/include/cglm/vec4.h +++ b/include/cglm/vec4.h @@ -215,7 +215,7 @@ glm_vec4_one(vec4 v) { #if defined(__wasm__) && defined(__wasm_simd128__) glmm_store(v, wasm_f32x4_const_splat(1.0f)); #elif defined( __SSE__ ) || defined( __SSE2__ ) - glmm_store(v, _mm_set1_ps(1.0f)); + glmm_store(v, glmm_set1(1.0f)); #elif defined(CGLM_NEON_FP) vst1q_f32(v, vdupq_n_f32(1.0f)); #else @@ -367,7 +367,7 @@ glm_vec4_adds(vec4 v, float s, vec4 dest) { #if defined(__wasm__) && defined(__wasm_simd128__) glmm_store(dest, wasm_f32x4_add(glmm_load(v), wasm_f32x4_splat(s))); #elif defined( __SSE__ ) || defined( __SSE2__ ) - glmm_store(dest, _mm_add_ps(glmm_load(v), _mm_set1_ps(s))); + glmm_store(dest, _mm_add_ps(glmm_load(v), glmm_set1(s))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vaddq_f32(vld1q_f32(v), vdupq_n_f32(s))); #else @@ -415,7 +415,7 @@ glm_vec4_subs(vec4 v, float s, vec4 dest) { #if defined(__wasm__) && defined(__wasm_simd128__) glmm_store(dest, wasm_f32x4_sub(glmm_load(v), wasm_f32x4_splat(s))); #elif defined( __SSE__ ) || defined( __SSE2__ ) - glmm_store(dest, _mm_sub_ps(glmm_load(v), _mm_set1_ps(s))); + glmm_store(dest, _mm_sub_ps(glmm_load(v), glmm_set1(s))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vsubq_f32(vld1q_f32(v), vdupq_n_f32(s))); #else @@ -463,7 +463,7 @@ glm_vec4_scale(vec4 v, float s, vec4 dest) { #if defined(__wasm__) && defined(__wasm_simd128__) glmm_store(dest, wasm_f32x4_mul(glmm_load(v), wasm_f32x4_splat(s))); #elif defined( __SSE__ ) || defined( __SSE2__ ) - glmm_store(dest, _mm_mul_ps(glmm_load(v), _mm_set1_ps(s))); + glmm_store(dest, _mm_mul_ps(glmm_load(v), glmm_set1(s))); #elif defined(CGLM_NEON_FP) vst1q_f32(dest, vmulq_f32(vld1q_f32(v), vdupq_n_f32(s))); #else @@ -528,7 +528,7 @@ glm_vec4_divs(vec4 v, float s, vec4 dest) { #if defined(__wasm__) && defined(__wasm_simd128__) glmm_store(dest, wasm_f32x4_div(glmm_load(v), wasm_f32x4_splat(s))); #elif defined( __SSE__ ) || defined( __SSE2__ ) - glmm_store(dest, _mm_div_ps(glmm_load(v), _mm_set1_ps(s))); + glmm_store(dest, _mm_div_ps(glmm_load(v), glmm_set1(s))); #else glm_vec4_scale(v, 1.0f / s, dest); #endif @@ -1065,8 +1065,8 @@ glm_vec4_clamp(vec4 v, float minVal, float maxVal) { glmm_store(v, glmm_min(glmm_max(glmm_load(v), wasm_f32x4_splat(minVal)), wasm_f32x4_splat(maxVal))); #elif defined( __SSE__ ) || defined( __SSE2__ ) - glmm_store(v, glmm_min(glmm_max(glmm_load(v), _mm_set1_ps(minVal)), - _mm_set1_ps(maxVal))); + glmm_store(v, glmm_min(glmm_max(glmm_load(v), glmm_set1(minVal)), + glmm_set1(maxVal))); #elif defined(CGLM_NEON_FP) glmm_store(v, glmm_min(glmm_max(vld1q_f32(v), vdupq_n_f32(minVal)), vdupq_n_f32(maxVal)));