tests: add some tests for vec4

This commit is contained in:
Recep Aslantas
2019-09-24 16:33:42 +03:00
parent f2073b2277
commit ce09e543ef
4 changed files with 829 additions and 224 deletions

View File

@@ -315,10 +315,10 @@ TEST_IMPL(GLM_PREFIX, vec3_norm_inf) {
n1 = GLM(vec3_norm_inf)(a);
n2 = fabsf(a[0]);
if (fabsf(a[0]) < fabsf(a[1]))
if (n2 < fabsf(a[1]))
n2 = fabsf(a[1]);
if (fabsf(a[1]) < fabsf(a[2]))
if (n2 < fabsf(a[2]))
n2 = fabsf(a[2]);
ASSERT(test_eq(n1, n2))

View File

@@ -5,228 +5,22 @@
* Full license can be found in the LICENSE file
*/
#include "test_common.h"
/* test inline mat3 */
CGLM_INLINE
float
test_vec4_dot(vec4 a, vec4 b) {
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3];
}
#define GLM_PREFIX glm_
#define GLM(X) (glm_ ## X)
CGLM_INLINE
void
test_vec4_normalize_to(vec4 vec, vec4 dest) {
float norm;
#include "test_vec4.h"
norm = glm_vec4_norm(vec);
#undef GLM
#undef GLM_PREFIX
if (norm == 0.0f) {
dest[0] = dest[1] = dest[2] = dest[3] = 0.0f;
return;
}
/* test pre-compiled mat3 */
glm_vec4_scale(vec, 1.0f / norm, dest);
}
#define GLM_PREFIX glmc_
#define GLM(X) (glmc_ ## X)
float
test_vec4_norm2(vec4 vec) {
return test_vec4_dot(vec, vec);
}
#include "test_vec4.h"
float
test_vec4_norm(vec4 vec) {
return sqrtf(test_vec4_dot(vec, vec));
}
void
test_vec4_maxv(vec4 v1, vec4 v2, vec4 dest) {
dest[0] = glm_max(v1[0], v2[0]);
dest[1] = glm_max(v1[1], v2[1]);
dest[2] = glm_max(v1[2], v2[2]);
dest[3] = glm_max(v1[3], v2[3]);
}
void
test_vec4_minv(vec4 v1, vec4 v2, vec4 dest) {
dest[0] = glm_min(v1[0], v2[0]);
dest[1] = glm_min(v1[1], v2[1]);
dest[2] = glm_min(v1[2], v2[2]);
dest[3] = glm_min(v1[3], v2[3]);
}
void
test_vec4_clamp(vec4 v, float minVal, float maxVal) {
v[0] = glm_clamp(v[0], minVal, maxVal);
v[1] = glm_clamp(v[1], minVal, maxVal);
v[2] = glm_clamp(v[2], minVal, maxVal);
v[3] = glm_clamp(v[3], minVal, maxVal);
}
TEST_IMPL(vec4) {
vec4 v, v1, v2, v3, v4, one, zero;
vec4s vs1, vs2, vs3, vs4;
int i;
float d1, d2;
glm_vec4_one(one);
glm_vec4_zero(zero);
for (i = 0; i < 1000; i++) {
/* 1. test SSE/SIMD dot product */
test_rand_vec4(v);
d1 = glm_vec4_dot(v, v);
d2 = test_vec4_dot(v, v);
ASSERT(fabsf(d1 - d2) <= 0.000009)
/* 2. test SIMD normalize */
test_vec4_normalize_to(v, v1);
glm_vec4_normalize_to(v, v2);
glm_vec4_normalize(v);
/* all must be same */
test_assert_vec4_eq(v1, v2);
test_assert_vec4_eq(v, v2);
/* 3. test SIMD norm */
test_rand_vec4(v);
test_assert_eqf(test_vec4_norm(v), glm_vec4_norm(v));
/* 3. test SIMD norm2 */
test_rand_vec4(v);
test_assert_eqf(test_vec4_norm2(v), glm_vec4_norm2(v));
/* 4. test SSE/SIMD distance */
test_rand_vec4(v1);
test_rand_vec4(v2);
d1 = glm_vec4_distance(v1, v2);
d2 = sqrtf(powf(v1[0] - v2[0], 2.0f)
+ powf(v1[1] - v2[1], 2.0f)
+ powf(v1[2] - v2[2], 2.0f)
+ powf(v1[3] - v2[3], 2.0f));
ASSERT(fabsf(d1 - d2) <= 0.000009)
}
/* test zero */
glm_vec4_zero(v);
ASSERTIFY(test_assert_vec4_eq(zero, v))
/* test one */
glm_vec4_one(v);
ASSERTIFY(test_assert_vec4_eq(one, v))
/* adds, subs, div, divs, mul */
glm_vec4_add(v, one, v);
ASSERT(glmc_vec4_eq_eps(v, 2))
glm_vec4_adds(v, 10, v);
ASSERT(glmc_vec4_eq_eps(v, 12))
glm_vec4_sub(v, one, v);
ASSERT(glmc_vec4_eq_eps(v, 11))
glm_vec4_subs(v, 1, v);
ASSERT(glmc_vec4_eq_eps(v, 10))
glm_vec4_broadcast(2, v1);
glm_vec4_div(v, v1, v);
ASSERT(glmc_vec4_eq_eps(v, 5))
glm_vec4_divs(v, 0.5f, v);
ASSERT(glmc_vec4_eq_eps(v, 10))
glm_vec4_mul(v, v1, v);
ASSERT(glmc_vec4_eq_eps(v, 20))
glm_vec4_scale(v, 0.5f, v);
ASSERT(glmc_vec4_eq_eps(v, 10))
glm_vec4_normalize_to(v, v1);
glm_vec4_scale(v1, 0.8f, v1);
glm_vec4_scale_as(v, 0.8f, v);
ASSERTIFY(test_assert_vec4_eq(v1, v))
/* addadd, subadd, muladd */
glm_vec4_one(v);
glm_vec4_addadd(one, one, v);
ASSERT(glmc_vec4_eq_eps(v, 3))
glm_vec4_subadd(one, zero, v);
ASSERT(glmc_vec4_eq_eps(v, 4))
glm_vec4_broadcast(2, v1);
glm_vec4_broadcast(3, v2);
glm_vec4_muladd(v1, v2, v);
ASSERT(glmc_vec4_eq_eps(v, 10))
/* min, max */
test_rand_vec4(v1);
test_rand_vec4(v2);
glm_vec4_maxv(v1, v2, v3);
test_vec4_maxv(v1, v2, v4);
ASSERTIFY(test_assert_vec4_eq(v3, v4))
glm_vec4_minv(v1, v2, v3);
test_vec4_minv(v1, v2, v4);
ASSERTIFY(test_assert_vec4_eq(v3, v4))
/* clamp */
glm_vec4_clamp(v3, 0.1f, 0.8f);
test_vec4_clamp(v4, 0.1f, 0.8f);
ASSERTIFY(test_assert_vec4_eq(v3, v4))
ASSERT(v3[0] >= 0.0999 && v3[0] <= 0.80001) /* rounding erros */
ASSERT(v3[1] >= 0.0999 && v3[1] <= 0.80001)
ASSERT(v3[2] >= 0.0999 && v3[2] <= 0.80001)
ASSERT(v3[3] >= 0.0999 && v3[3] <= 0.80001)
/* swizzle */
/* ZYX */
v1[0] = 1;
v1[1] = 2;
v1[2] = 3;
v1[3] = 4;
glm_vec4_swizzle(v1, GLM_WZYX, v1);
ASSERTIFY(test_assert_vec4_eq(v1, (vec4){4, 3, 2, 1}))
glm_vec4_swizzle(v1, GLM_XXXX, v1);
ASSERTIFY(test_assert_vec4_eq(v1, (vec4){4, 4, 4, 4}))
v1[0] = 1;
v1[1] = 2;
v1[2] = 3;
v1[3] = 4;
glm_vec4_swizzle(v1, GLM_YYYY, v1);
ASSERTIFY(test_assert_vec4_eq(v1, (vec4){2, 2, 2, 2}))
v1[0] = 1;
v1[1] = 2;
v1[2] = 3;
v1[3] = 4;
glm_vec4_swizzle(v1, GLM_ZZZZ, v1);
ASSERTIFY(test_assert_vec4_eq(v1, (vec4){3, 3, 3, 3}))
v1[0] = 1;
v1[1] = 2;
v1[2] = 3;
v1[3] = 4;
glm_vec4_swizzle(v1, GLM_WWWW, v1);
ASSERTIFY(test_assert_vec4_eq(v1, (vec4){4, 4, 4, 4}))
/* structs */
vs1 = test_rand_vec4s();
vs2 = test_rand_vec4s();
vs3 = glms_vec4_add(vs1, vs2);
vs4 = glms_vec4_maxv(vs1, vs3);
ASSERTIFY(test_assert_vec4s_eq(vs3, vs4))
TEST_SUCCESS
}
#undef GLM
#undef GLM_PREFIX

660
test/src/test_vec4.h Normal file
View File

@@ -0,0 +1,660 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#include "test_common.h"
#define TEST_GLM_SHUFFLE4(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2)|(w))
#ifndef CGLM_TEST_VEC4_ONCE
#define CGLM_TEST_VEC4_ONCE
/* Macros */
TEST_IMPL(MACRO_GLM_VEC4_ONE_INIT) {
vec4 v = GLM_VEC4_ONE_INIT;
ASSERT(test_eq(v[0], 1.0f))
ASSERT(test_eq(v[1], 1.0f))
ASSERT(test_eq(v[2], 1.0f))
ASSERT(test_eq(v[3], 1.0f))
TEST_SUCCESS
}
TEST_IMPL(MACRO_GLM_VEC4_ZERO_INIT) {
vec4 v = GLM_VEC4_ZERO_INIT;
ASSERT(test_eq(v[0], 0.0f))
ASSERT(test_eq(v[1], 0.0f))
ASSERT(test_eq(v[2], 0.0f))
ASSERT(test_eq(v[3], 0.0f))
TEST_SUCCESS
}
TEST_IMPL(MACRO_GLM_VEC4_ONE) {
ASSERT(test_eq(GLM_VEC4_ONE[0], 1.0f))
ASSERT(test_eq(GLM_VEC4_ONE[1], 1.0f))
ASSERT(test_eq(GLM_VEC4_ONE[2], 1.0f))
ASSERT(test_eq(GLM_VEC4_ONE[3], 1.0f))
TEST_SUCCESS
}
TEST_IMPL(MACRO_GLM_VEC4_ZERO) {
ASSERT(test_eq(GLM_VEC4_ZERO[0], 0.0f))
ASSERT(test_eq(GLM_VEC4_ZERO[1], 0.0f))
ASSERT(test_eq(GLM_VEC4_ZERO[2], 0.0f))
ASSERT(test_eq(GLM_VEC4_ZERO[3], 0.0f))
TEST_SUCCESS
}
TEST_IMPL(MACRO_GLM_XXXX) {
ASSERT(TEST_GLM_SHUFFLE4(0, 0, 0, 0) == GLM_XXXX)
TEST_SUCCESS
}
TEST_IMPL(MACRO_GLM_YYYY) {
ASSERT(TEST_GLM_SHUFFLE4(1, 1, 1, 1) == GLM_YYYY)
TEST_SUCCESS
}
TEST_IMPL(MACRO_GLM_ZZZZ) {
ASSERT(TEST_GLM_SHUFFLE4(2, 2, 2, 2) == GLM_ZZZZ)
TEST_SUCCESS
}
TEST_IMPL(MACRO_GLM_WZYX) {
ASSERT(TEST_GLM_SHUFFLE4(0, 1, 2, 3) == GLM_WZYX)
TEST_SUCCESS
}
/* Deprecated */
TEST_IMPL(MACRO_glm_vec4_dup) {
vec4 v1 = {13.0f, 12.0f, 11.0f, 56.0f}, v2;
glm_vec4_dup(v1, v2);
ASSERTIFY(test_assert_vec4_eq(v1, v2))
TEST_SUCCESS
}
TEST_IMPL(MACRO_glm_vec4_flipsign) {
vec4 v1 = {13.0f, -12.0f, 11.0f, 56.0f},
v2 = {13.0f, -12.0f, 11.0f, 56.0f},
v3 = {-13.0f, 12.0f, -11.0f, -56.0f};
glm_vec4_flipsign(v1);
glmc_vec4_flipsign(v2);
ASSERTIFY(test_assert_vec4_eq(v1, v3))
ASSERTIFY(test_assert_vec4_eq(v2, v3))
TEST_SUCCESS
}
TEST_IMPL(MACRO_glm_vec4_flipsign_to) {
vec4 v1 = {13.0f, -12.0f, 11.0f, 56.0f},
v2 = {-13.0f, 12.0f, -11.0f, -56.0f},
v3, v4;
glm_vec4_flipsign_to(v1, v3);
glmc_vec4_flipsign_to(v1, v4);
ASSERTIFY(test_assert_vec4_eq(v2, v3))
ASSERTIFY(test_assert_vec4_eq(v2, v4))
TEST_SUCCESS
}
TEST_IMPL(MACRO_glm_vec4_inv) {
vec4 v1 = {13.0f, -12.0f, 11.0f, 56.0f},
v2 = {13.0f, -12.0f, 11.0f, 56.0f},
v3 = {-13.0f, 12.0f, -11.0f, -56.0f};
glm_vec4_inv(v1);
glmc_vec4_inv(v2);
ASSERTIFY(test_assert_vec4_eq(v1, v3))
ASSERTIFY(test_assert_vec4_eq(v2, v3))
TEST_SUCCESS
}
TEST_IMPL(MACRO_glm_vec4_inv_to) {
vec4 v1 = {13.0f, -12.0f, 11.0f, 56.0f},
v2 = {-13.0f, 12.0f, -11.0f, -56.0f},
v3, v4;
glm_vec4_inv_to(v1, v3);
glmc_vec4_inv_to(v1, v4);
ASSERTIFY(test_assert_vec4_eq(v3, v4))
ASSERTIFY(test_assert_vec4_eq(v2, v3))
TEST_SUCCESS
}
TEST_IMPL(MACRO_glm_vec4_mulv) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 56.0f},
v2 = {-3.0f, 4.0f, -5.0f, 56.0f},
v3, v4;
glm_vec4_mulv(v1, v2, v3);
glmc_vec4_mulv(v1, v2, v4);
ASSERTIFY(test_assert_vec4_eq(v3, v4))
ASSERT(test_eq(v1[0] * v2[0], v3[0]))
ASSERT(test_eq(v1[1] * v2[1], v3[1]))
ASSERT(test_eq(v1[2] * v2[2], v3[2]))
ASSERT(test_eq(v1[3] * v2[3], v3[3]))
TEST_SUCCESS
}
#endif /* CGLM_TEST_VEC4_ONCE */
/* --- */
TEST_IMPL(GLM_PREFIX, vec4) {
vec4 v1 = {10.0f, 9.0f, 8.0f};
vec4 v2 = {10.0f, 9.0f, 8.0f, 7.0f};
vec4 v3;
GLM(vec4)(v1, 7.0f, v3);
ASSERTIFY(test_assert_vec4_eq(v2, v3))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_copy3) {
vec4 v4 = {10.0f, 9.0f, 8.0f, 7.0f};
vec3 v3;
GLM(vec4_copy3)(v4, v3);
ASSERTIFY(test_assert_vec3_eq(v3, v4))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_copy) {
vec4 v1 = {10.0f, 9.0f, 8.0f, 78.0f};
vec4 v2 = {1.0f, 2.0f, 3.0f, 4.0f};
GLM(vec4_copy)(v1, v2);
ASSERTIFY(test_assert_vec4_eq(v1, v2))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_ucopy) {
vec4 v1 = {10.0f, 9.0f, 8.0f, 78.0f};
vec4 v2 = {1.0f, 2.0f, 3.0f, 4.0f};
GLM(vec4_ucopy)(v1, v2);
ASSERTIFY(test_assert_vec4_eq(v1, v2))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_zero) {
vec4 v1 = {10.0f, 9.0f, 8.0f, 78.0f};
vec4 v2 = {1.0f, 2.0f, 3.0f, 4.0f};
GLM(vec4_zero)(v1);
GLM(vec4_zero)(v2);
ASSERTIFY(test_assert_vec4_eq(v1, GLM_VEC4_ZERO))
ASSERTIFY(test_assert_vec4_eq(v1, GLM_VEC4_ZERO))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_one) {
vec4 v1 = {10.0f, 9.0f, 8.0f, 78.0f};
vec4 v2 = {1.0f, 2.0f, 3.0f, 4.0f};
GLM(vec4_one)(v1);
GLM(vec4_one)(v2);
ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC4_ONE))
ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC4_ONE))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_dot) {
vec4 a = {10.0f, 9.0f, 8.0f, 78.0f};
vec4 b = {1.0f, 2.0f, 3.0f, 4.0f};
float dot1, dot2;
dot1 = GLM(vec4_dot)(a, b);
dot2 = a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3];
ASSERT(test_eq(dot1, dot2))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_norm2) {
vec4 a = {10.0f, 9.0f, 8.0f, 78.0f};
float n1, n2;
n1 = GLM(vec4_norm2)(a);
n2 = a[0] * a[0] + a[1] * a[1] + a[2] * a[2] + a[3] * a[3];
ASSERT(test_eq(n1, n2))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_norm) {
vec3 a = {10.0f, 9.0f, 8.0f};
float n1, n2;
n1 = GLM(vec3_norm)(a);
n2 = sqrtf(a[0] * a[0] + a[1] * a[1] + a[2] * a[2]);
ASSERT(test_eq(n1, n2))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_norm_one) {
vec4 a = {-10.0f, 9.0f, -8.0f, 78.0f};
float n1, n2;
n1 = GLM(vec4_norm_one)(a);
n2 = fabsf(a[0]) + fabsf(a[1]) + fabsf(a[2]) + fabsf(a[3]);
ASSERT(test_eq(n1, n2))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_norm_inf) {
vec4 a = {-10.0f, 9.0f, -8.0f, 78.0f};
float n1, n2;
n1 = GLM(vec4_norm_inf)(a);
n2 = fabsf(a[0]);
if (n2 < fabsf(a[1]))
n2 = fabsf(a[1]);
if (n2 < fabsf(a[2]))
n2 = fabsf(a[2]);
if (n2 < fabsf(a[3]))
n2 = fabsf(a[3]);
ASSERT(test_eq(n1, n2))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_add) {
vec4 a = {-10.0f, 9.0f, -8.0f, 56.0f};
vec4 b = {12.0f, 19.0f, -18.0f, 1.0f};
vec4 c, d;
c[0] = a[0] + b[0];
c[1] = a[1] + b[1];
c[2] = a[2] + b[2];
c[3] = a[3] + b[3];
GLM(vec4_add)(a, b, d);
ASSERTIFY(test_assert_vec4_eq(c, d))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_adds) {
vec4 a = {-10.0f, 9.0f, -8.0f, 56.0f};
vec4 c, d;
float s = 7.0f;
c[0] = a[0] + s;
c[1] = a[1] + s;
c[2] = a[2] + s;
c[3] = a[3] + s;
GLM(vec4_adds)(a, s, d);
ASSERTIFY(test_assert_vec4_eq(c, d))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_sub) {
vec4 a = {-10.0f, 9.0f, -8.0f, 56.0f};
vec4 b = {12.0f, 19.0f, -18.0f, 1.0f};
vec4 c, d;
c[0] = a[0] - b[0];
c[1] = a[1] - b[1];
c[2] = a[2] - b[2];
c[3] = a[3] - b[3];
GLM(vec4_sub)(a, b, d);
ASSERTIFY(test_assert_vec4_eq(c, d))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_subs) {
vec4 a = {-10.0f, 9.0f, -8.0f, 74.0f};
vec4 c, d;
float s = 7.0f;
c[0] = a[0] - s;
c[1] = a[1] - s;
c[2] = a[2] - s;
c[3] = a[3] - s;
GLM(vec4_subs)(a, s, d);
ASSERTIFY(test_assert_vec4_eq(c, d))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_mul) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 56.0f},
v2 = {-3.0f, 4.0f, -5.0f, 46.0f},
v3;
GLM(vec4_mul)(v1, v2, v3);
ASSERT(test_eq(v1[0] * v2[0], v3[0]))
ASSERT(test_eq(v1[1] * v2[1], v3[1]))
ASSERT(test_eq(v1[2] * v2[2], v3[2]))
ASSERT(test_eq(v1[3] * v2[3], v3[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_scale) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 5.0f}, v2;
float s = 7.0f;
GLM(vec4_scale)(v1, s, v2);
ASSERT(test_eq(v1[0] * s, v2[0]))
ASSERT(test_eq(v1[1] * s, v2[1]))
ASSERT(test_eq(v1[2] * s, v2[2]))
ASSERT(test_eq(v1[3] * s, v2[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_scale_as) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 5.0f}, v2;
float s = 7.0f;
float norm;
GLM(vec4_scale_as)(v1, s, v2);
norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2] + v1[3] * v1[3]);
if (norm == 0.0f) {
ASSERT(test_eq(v1[0], 0.0f))
ASSERT(test_eq(v1[1], 0.0f))
ASSERT(test_eq(v1[2], 0.0f))
ASSERT(test_eq(v1[3], 0.0f))
TEST_SUCCESS
}
norm = s / norm;
ASSERT(test_eq(v1[0] * norm, v2[0]))
ASSERT(test_eq(v1[1] * norm, v2[1]))
ASSERT(test_eq(v1[2] * norm, v2[2]))
ASSERT(test_eq(v1[3] * norm, v2[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_div) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 40.0f},
v2 = {-3.0f, 4.0f, -5.0f, 2.0f},
v3;
GLM(vec4_div)(v1, v2, v3);
ASSERT(test_eq(v1[0] / v2[0], v3[0]))
ASSERT(test_eq(v1[1] / v2[1], v3[1]))
ASSERT(test_eq(v1[2] / v2[2], v3[2]))
ASSERT(test_eq(v1[3] / v2[3], v3[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_divs) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 40.0f}, v2;
float s = 7.0f;
GLM(vec4_divs)(v1, s, v2);
ASSERT(test_eq(v1[0] / s, v2[0]))
ASSERT(test_eq(v1[1] / s, v2[1]))
ASSERT(test_eq(v1[2] / s, v2[2]))
ASSERT(test_eq(v1[3] / s, v2[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_addadd) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f},
v2 = {-3.0f, 4.0f, -5.0f, 20.0f},
v3 = {1.0f, 2.0f, 3.0f, 130.0f},
v4 = {1.0f, 2.0f, 3.0f, 130.0f};
GLM(vec4_addadd)(v1, v2, v4);
ASSERT(test_eq(v3[0] + v1[0] + v2[0], v4[0]))
ASSERT(test_eq(v3[1] + v1[1] + v2[1], v4[1]))
ASSERT(test_eq(v3[2] + v1[2] + v2[2], v4[2]))
ASSERT(test_eq(v3[3] + v1[3] + v2[3], v4[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_subadd) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f},
v2 = {-3.0f, 4.0f, -5.0f, 20.0f},
v3 = {1.0f, 2.0f, 3.0f, 130.0f},
v4 = {1.0f, 2.0f, 3.0f, 130.0f};
GLM(vec4_subadd)(v1, v2, v4);
ASSERT(test_eq(v3[0] + v1[0] - v2[0], v4[0]))
ASSERT(test_eq(v3[1] + v1[1] - v2[1], v4[1]))
ASSERT(test_eq(v3[2] + v1[2] - v2[2], v4[2]))
ASSERT(test_eq(v3[3] + v1[3] - v2[3], v4[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_muladd) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f},
v2 = {-3.0f, 4.0f, -5.0f, 20.0f},
v3 = {1.0f, 2.0f, 3.0f, 130.0f},
v4 = {1.0f, 2.0f, 3.0f, 130.0f};
GLM(vec4_muladd)(v1, v2, v4);
ASSERT(test_eq(v3[0] + v1[0] * v2[0], v4[0]))
ASSERT(test_eq(v3[1] + v1[1] * v2[1], v4[1]))
ASSERT(test_eq(v3[2] + v1[2] * v2[2], v4[2]))
ASSERT(test_eq(v3[3] + v1[3] * v2[3], v4[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_muladds) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f},
v2 = {-3.0f, 4.0f, -5.0f, 20.0f},
v3 = {-3.0f, 4.0f, -5.0f, 20.0f};
float s = 9.0f;
GLM(vec4_muladds)(v1, s, v3);
ASSERT(test_eq(v2[0] + v1[0] * s, v3[0]))
ASSERT(test_eq(v2[1] + v1[1] * s, v3[1]))
ASSERT(test_eq(v2[2] + v1[2] * s, v3[2]))
ASSERT(test_eq(v2[3] + v1[3] * s, v3[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_maxadd) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f},
v2 = {-3.0f, 4.0f, -5.0f, 20.0f},
v3 = {1.0f, 2.0f, 3.0f, 130.0f},
v4 = {1.0f, 2.0f, 3.0f, 130.0f};
GLM(vec4_maxadd)(v1, v2, v4);
ASSERT(test_eq(v3[0] + glm_max(v1[0], v2[0]), v4[0]))
ASSERT(test_eq(v3[1] + glm_max(v1[1], v2[1]), v4[1]))
ASSERT(test_eq(v3[2] + glm_max(v1[2], v2[2]), v4[2]))
ASSERT(test_eq(v3[3] + glm_max(v1[3], v2[3]), v4[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_minadd) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f},
v2 = {-3.0f, 4.0f, -5.0f, 20.0f},
v3 = {1.0f, 2.0f, 3.0f, 130.0f},
v4 = {1.0f, 2.0f, 3.0f, 130.0f};
GLM(vec4_minadd)(v1, v2, v4);
ASSERT(test_eq(v3[0] + glm_min(v1[0], v2[0]), v4[0]))
ASSERT(test_eq(v3[1] + glm_min(v1[1], v2[1]), v4[1]))
ASSERT(test_eq(v3[2] + glm_min(v1[2], v2[2]), v4[2]))
ASSERT(test_eq(v3[3] + glm_min(v1[3], v2[3]), v4[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_negate_to) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 60.0f},
v2 = {-3.0f, 4.0f, -5.0f, 34.0f},
v3, v4;
GLM(vec4_negate_to)(v1, v3);
GLM(vec4_negate_to)(v2, v4);
ASSERT(test_eq(-v1[0], v3[0]))
ASSERT(test_eq(-v1[1], v3[1]))
ASSERT(test_eq(-v1[2], v3[2]))
ASSERT(test_eq(-v1[3], v3[3]))
ASSERT(test_eq(-v2[0], v4[0]))
ASSERT(test_eq(-v2[1], v4[1]))
ASSERT(test_eq(-v2[2], v4[2]))
ASSERT(test_eq(-v2[3], v4[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_negate) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 60.0f},
v2 = {-3.0f, 4.0f, -5.0f, 34.0f},
v3 = {2.0f, -3.0f, 4.0f, 60.0f},
v4 = {-3.0f, 4.0f, -5.0f, 34.0f};
GLM(vec4_negate)(v1);
GLM(vec4_negate)(v2);
ASSERT(test_eq(-v1[0], v3[0]))
ASSERT(test_eq(-v1[1], v3[1]))
ASSERT(test_eq(-v1[2], v3[2]))
ASSERT(test_eq(-v1[3], v3[3]))
ASSERT(test_eq(-v2[0], v4[0]))
ASSERT(test_eq(-v2[1], v4[1]))
ASSERT(test_eq(-v2[2], v4[2]))
ASSERT(test_eq(-v2[3], v4[3]))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_normalize) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 5.0f}, v2 = {2.0f, -3.0f, 4.0f, 5.0f};
float s = 1.0f;
float norm;
GLM(vec4_normalize)(v2);
norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2] + v1[3] * v1[3]);
if (norm == 0.0f) {
ASSERT(test_eq(v1[0], 0.0f))
ASSERT(test_eq(v1[1], 0.0f))
ASSERT(test_eq(v1[2], 0.0f))
ASSERT(test_eq(v1[3], 0.0f))
TEST_SUCCESS
}
norm = s / norm;
ASSERT(test_eq(v1[0] * norm, v2[0]))
ASSERT(test_eq(v1[1] * norm, v2[1]))
ASSERT(test_eq(v1[2] * norm, v2[2]))
ASSERT(test_eq(v1[3] * norm, v2[3]))
glm_vec4_zero(v1);
GLM(vec4_normalize)(v1);
ASSERTIFY(test_assert_vec4_eq(v1, GLM_VEC4_ZERO))
TEST_SUCCESS
}
TEST_IMPL(GLM_PREFIX, vec4_normalize_to) {
vec4 v1 = {2.0f, -3.0f, 4.0f, 5.0f}, v2;
float s = 1.0f;
float norm;
GLM(vec4_normalize_to)(v1, v2);
norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2] + v1[3] * v1[3]);
if (norm == 0.0f) {
ASSERT(test_eq(v1[0], 0.0f))
ASSERT(test_eq(v1[1], 0.0f))
ASSERT(test_eq(v1[2], 0.0f))
ASSERT(test_eq(v1[3], 0.0f))
TEST_SUCCESS
}
norm = s / norm;
ASSERT(test_eq(v1[0] * norm, v2[0]))
ASSERT(test_eq(v1[1] * norm, v2[1]))
ASSERT(test_eq(v1[2] * norm, v2[2]))
ASSERT(test_eq(v1[3] * norm, v2[3]))
glm_vec4_zero(v1);
GLM(vec4_normalize_to)(v1, v2);
ASSERTIFY(test_assert_vec4_eq(v2, GLM_VEC4_ZERO))
TEST_SUCCESS
}

View File

@@ -277,7 +277,6 @@ TEST_DECLARE(glmc_vec3_smoothstep)
TEST_DECLARE(glmc_vec3_smoothinterp)
TEST_DECLARE(glmc_vec3_smoothinterpc)
TEST_DECLARE(glmc_vec3_swizzle)
TEST_DECLARE(glmc_vec3_broadcast)
TEST_DECLARE(glmc_vec3_fill)
TEST_DECLARE(glmc_vec3_eq)
@@ -297,7 +296,84 @@ TEST_DECLARE(glmc_vec3_hadd)
TEST_DECLARE(glmc_vec3_sqrt)
/* vec4 */
TEST_DECLARE(vec4)
TEST_DECLARE(MACRO_GLM_VEC4_ONE_INIT)
TEST_DECLARE(MACRO_GLM_VEC4_ZERO_INIT)
TEST_DECLARE(MACRO_GLM_VEC4_ONE)
TEST_DECLARE(MACRO_GLM_VEC4_ZERO)
TEST_DECLARE(MACRO_GLM_XXXX)
TEST_DECLARE(MACRO_GLM_YYYY)
TEST_DECLARE(MACRO_GLM_ZZZZ)
TEST_DECLARE(MACRO_GLM_WZYX)
TEST_DECLARE(MACRO_glm_vec4_dup)
TEST_DECLARE(MACRO_glm_vec4_flipsign)
TEST_DECLARE(MACRO_glm_vec4_flipsign_to)
TEST_DECLARE(MACRO_glm_vec4_inv)
TEST_DECLARE(MACRO_glm_vec4_inv_to)
TEST_DECLARE(MACRO_glm_vec4_mulv)
TEST_DECLARE(glm_vec4)
TEST_DECLARE(glm_vec4_copy3)
TEST_DECLARE(glm_vec4_copy)
TEST_DECLARE(glm_vec4_ucopy)
TEST_DECLARE(glm_vec4_zero)
TEST_DECLARE(glm_vec4_one)
TEST_DECLARE(glm_vec4_dot)
TEST_DECLARE(glm_vec4_norm2)
TEST_DECLARE(glm_vec4_norm)
TEST_DECLARE(glm_vec4_norm_one)
TEST_DECLARE(glm_vec4_norm_inf)
TEST_DECLARE(glm_vec4_add)
TEST_DECLARE(glm_vec4_adds)
TEST_DECLARE(glm_vec4_sub)
TEST_DECLARE(glm_vec4_subs)
TEST_DECLARE(glm_vec4_mul)
TEST_DECLARE(glm_vec4_scale)
TEST_DECLARE(glm_vec4_scale_as)
TEST_DECLARE(glm_vec4_div)
TEST_DECLARE(glm_vec4_divs)
TEST_DECLARE(glm_vec4_addadd)
TEST_DECLARE(glm_vec4_subadd)
TEST_DECLARE(glm_vec4_muladd)
TEST_DECLARE(glm_vec4_muladds)
TEST_DECLARE(glm_vec4_maxadd)
TEST_DECLARE(glm_vec4_minadd)
TEST_DECLARE(glm_vec4_negate_to)
TEST_DECLARE(glm_vec4_negate)
TEST_DECLARE(glm_vec4_normalize)
TEST_DECLARE(glm_vec4_normalize_to)
TEST_DECLARE(glmc_vec4)
TEST_DECLARE(glmc_vec4_copy3)
TEST_DECLARE(glmc_vec4_copy)
TEST_DECLARE(glmc_vec4_ucopy)
TEST_DECLARE(glmc_vec4_zero)
TEST_DECLARE(glmc_vec4_one)
TEST_DECLARE(glmc_vec4_dot)
TEST_DECLARE(glmc_vec4_norm2)
TEST_DECLARE(glmc_vec4_norm)
TEST_DECLARE(glmc_vec4_norm_one)
TEST_DECLARE(glmc_vec4_norm_inf)
TEST_DECLARE(glmc_vec4_add)
TEST_DECLARE(glmc_vec4_adds)
TEST_DECLARE(glmc_vec4_sub)
TEST_DECLARE(glmc_vec4_subs)
TEST_DECLARE(glmc_vec4_mul)
TEST_DECLARE(glmc_vec4_scale)
TEST_DECLARE(glmc_vec4_scale_as)
TEST_DECLARE(glmc_vec4_div)
TEST_DECLARE(glmc_vec4_divs)
TEST_DECLARE(glmc_vec4_addadd)
TEST_DECLARE(glmc_vec4_subadd)
TEST_DECLARE(glmc_vec4_muladd)
TEST_DECLARE(glmc_vec4_muladds)
TEST_DECLARE(glmc_vec4_maxadd)
TEST_DECLARE(glmc_vec4_minadd)
TEST_DECLARE(glmc_vec4_negate_to)
TEST_DECLARE(glmc_vec4_negate)
TEST_DECLARE(glmc_vec4_normalize)
TEST_DECLARE(glmc_vec4_normalize_to)
/*****************************************************************************/
@@ -434,7 +510,6 @@ TEST_LIST {
TEST_ENTRY(MACRO_GLM_YYY)
TEST_ENTRY(MACRO_GLM_ZZZ)
TEST_ENTRY(MACRO_GLM_ZYX)
TEST_ENTRY(MACRO_glm_vec3_dup)
TEST_ENTRY(MACRO_glm_vec3_flipsign)
TEST_ENTRY(MACRO_glm_vec3_flipsign_to)
@@ -585,7 +660,83 @@ TEST_LIST {
TEST_ENTRY(glmc_vec3_sqrt)
/* vec4 */
TEST_ENTRY(vec4)
TEST_ENTRY(MACRO_GLM_VEC4_ONE_INIT)
TEST_ENTRY(MACRO_GLM_VEC4_ZERO_INIT)
TEST_ENTRY(MACRO_GLM_VEC4_ONE)
TEST_ENTRY(MACRO_GLM_VEC4_ZERO)
TEST_ENTRY(MACRO_GLM_XXXX)
TEST_ENTRY(MACRO_GLM_YYYY)
TEST_ENTRY(MACRO_GLM_ZZZZ)
TEST_ENTRY(MACRO_GLM_WZYX)
TEST_ENTRY(MACRO_glm_vec4_dup)
TEST_ENTRY(MACRO_glm_vec4_flipsign)
TEST_ENTRY(MACRO_glm_vec4_flipsign_to)
TEST_ENTRY(MACRO_glm_vec4_inv)
TEST_ENTRY(MACRO_glm_vec4_inv_to)
TEST_ENTRY(MACRO_glm_vec4_mulv)
TEST_ENTRY(glm_vec4)
TEST_ENTRY(glm_vec4_copy3)
TEST_ENTRY(glm_vec4_copy)
TEST_ENTRY(glm_vec4_ucopy)
TEST_ENTRY(glm_vec4_zero)
TEST_ENTRY(glm_vec4_one)
TEST_ENTRY(glm_vec4_dot)
TEST_ENTRY(glm_vec4_norm2)
TEST_ENTRY(glm_vec4_norm)
TEST_ENTRY(glm_vec4_norm_one)
TEST_ENTRY(glm_vec4_norm_inf)
TEST_ENTRY(glm_vec4_add)
TEST_ENTRY(glm_vec4_adds)
TEST_ENTRY(glm_vec4_sub)
TEST_ENTRY(glm_vec4_subs)
TEST_ENTRY(glm_vec4_mul)
TEST_ENTRY(glm_vec4_scale)
TEST_ENTRY(glm_vec4_scale_as)
TEST_ENTRY(glm_vec4_div)
TEST_ENTRY(glm_vec4_divs)
TEST_ENTRY(glm_vec4_addadd)
TEST_ENTRY(glm_vec4_subadd)
TEST_ENTRY(glm_vec4_muladd)
TEST_ENTRY(glm_vec4_muladds)
TEST_ENTRY(glm_vec4_maxadd)
TEST_ENTRY(glm_vec4_minadd)
TEST_ENTRY(glm_vec4_negate_to)
TEST_ENTRY(glm_vec4_negate)
TEST_ENTRY(glm_vec4_normalize)
TEST_ENTRY(glm_vec4_normalize_to)
TEST_ENTRY(glmc_vec4)
TEST_ENTRY(glmc_vec4_copy3)
TEST_ENTRY(glmc_vec4_copy)
TEST_ENTRY(glmc_vec4_ucopy)
TEST_ENTRY(glmc_vec4_zero)
TEST_ENTRY(glmc_vec4_one)
TEST_ENTRY(glmc_vec4_dot)
TEST_ENTRY(glmc_vec4_norm2)
TEST_ENTRY(glmc_vec4_norm)
TEST_ENTRY(glmc_vec4_norm_one)
TEST_ENTRY(glmc_vec4_norm_inf)
TEST_ENTRY(glmc_vec4_add)
TEST_ENTRY(glmc_vec4_adds)
TEST_ENTRY(glmc_vec4_sub)
TEST_ENTRY(glmc_vec4_subs)
TEST_ENTRY(glmc_vec4_mul)
TEST_ENTRY(glmc_vec4_scale)
TEST_ENTRY(glmc_vec4_scale_as)
TEST_ENTRY(glmc_vec4_div)
TEST_ENTRY(glmc_vec4_divs)
TEST_ENTRY(glmc_vec4_addadd)
TEST_ENTRY(glmc_vec4_subadd)
TEST_ENTRY(glmc_vec4_muladd)
TEST_ENTRY(glmc_vec4_muladds)
TEST_ENTRY(glmc_vec4_maxadd)
TEST_ENTRY(glmc_vec4_minadd)
TEST_ENTRY(glmc_vec4_negate_to)
TEST_ENTRY(glmc_vec4_negate)
TEST_ENTRY(glmc_vec4_normalize)
TEST_ENTRY(glmc_vec4_normalize_to)
};
#endif /* tests_h */