avx: optimize avx mat4 scale and mat4 mul

This commit is contained in:
Recep Aslantas
2024-04-13 00:33:57 +03:00
parent d75467f93f
commit 44cd0ae4fd

View File

@@ -15,11 +15,18 @@
CGLM_INLINE
void
glm_mat4_scale_avx(mat4 m, float s) {
__m256 y0;
y0 = _mm256_broadcast_ss(&s);
__m256 y0, y1, y2, y3, y4;
glmm_store256(m[0], _mm256_mul_ps(y0, glmm_load256(m[0])));
glmm_store256(m[2], _mm256_mul_ps(y0, glmm_load256(m[2])));
y0 = glmm_load256(m[0]); /* h g f e d c b a */
y1 = glmm_load256(m[2]); /* p o n m l k j i */
y2 = _mm256_broadcast_ss(&s);
y3 = _mm256_mul_ps(y0, y2);
y4 = _mm256_mul_ps(y1, y2);
glmm_store256(m[0], y3);
glmm_store256(m[2], y4);
}
/* TODO: this must be tested and compared to SSE version, may be slower!!! */
@@ -52,7 +59,8 @@ void
glm_mat4_mul_avx(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
__m256 y0, y1, y2, y3, y4, y5, y6, y7, y8, y9;
__m256 y0, y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11, y12, y13;
__m256i yi0, yi1, yi2, yi3;
y0 = glmm_load256(m2[0]); /* h g f e d c b a */
y1 = glmm_load256(m2[2]); /* p o n m l k j i */
@@ -64,35 +72,43 @@ glm_mat4_mul_avx(mat4 m1, mat4 m2, mat4 dest) {
y4 = _mm256_permute2f128_ps(y2, y2, 0x03); /* d c b a h g f e */
y5 = _mm256_permute2f128_ps(y3, y3, 0x03); /* l k j i p o n m */
yi0 = _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0);
yi1 = _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2);
yi2 = _mm256_set_epi32(0, 0, 0, 0, 1, 1, 1, 1);
yi3 = _mm256_set_epi32(2, 2, 2, 2, 3, 3, 3, 3);
/* f f f f a a a a */
/* h h h h c c c c */
/* e e e e b b b b */
/* g g g g d d d d */
y6 = _mm256_permutevar_ps(y0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
y7 = _mm256_permutevar_ps(y0, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
y8 = _mm256_permutevar_ps(y0, _mm256_set_epi32(0, 0, 0, 0, 1, 1, 1, 1));
y9 = _mm256_permutevar_ps(y0, _mm256_set_epi32(2, 2, 2, 2, 3, 3, 3, 3));
glmm_store256(dest[0],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
y6 = _mm256_permutevar_ps(y0, yi0);
y7 = _mm256_permutevar_ps(y0, yi1);
y8 = _mm256_permutevar_ps(y0, yi2);
y9 = _mm256_permutevar_ps(y0, yi3);
/* n n n n i i i i */
/* p p p p k k k k */
/* m m m m j j j j */
/* o o o o l l l l */
y6 = _mm256_permutevar_ps(y1, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
y7 = _mm256_permutevar_ps(y1, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
y8 = _mm256_permutevar_ps(y1, _mm256_set_epi32(0, 0, 0, 0, 1, 1, 1, 1));
y9 = _mm256_permutevar_ps(y1, _mm256_set_epi32(2, 2, 2, 2, 3, 3, 3, 3));
y10 = _mm256_permutevar_ps(y1, yi0);
y11 = _mm256_permutevar_ps(y1, yi1);
y12 = _mm256_permutevar_ps(y1, yi2);
y13 = _mm256_permutevar_ps(y1, yi3);
glmm_store256(dest[2],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
y0 = _mm256_mul_ps(y2, y6);
y1 = _mm256_mul_ps(y2, y10);
y0 = glmm256_fmadd(y3, y7, y0);
y1 = glmm256_fmadd(y3, y11, y1);
y0 = glmm256_fmadd(y4, y8, y0);
y1 = glmm256_fmadd(y4, y12, y1);
y0 = glmm256_fmadd(y5, y9, y0);
y1 = glmm256_fmadd(y5, y13, y1);
glmm_store256(dest[0], y0);
glmm_store256(dest[2], y1);
}
#endif