diff --git a/.github/workflows/ccpp.yml b/.github/workflows/ccpp.yml deleted file mode 100644 index 3621c64..0000000 --- a/.github/workflows/ccpp.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: C/C++ CI - -on: [push] - -jobs: - build: - - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, macOS-latest] - - steps: - - uses: actions/checkout@v1 - - name: dependencies - run: sh ./build-deps.sh - - name: autogen - run: sh autogen.sh - - name: configure - run: ./configure - - name: make - run: make - - name: make check - run: make check diff --git a/.gitignore b/.gitignore index 0655cc2..8f9fd0c 100644 --- a/.gitignore +++ b/.gitignore @@ -51,7 +51,6 @@ cscope.* test/*.trs test/test_* *.log -test-* test/.libs/* test/tests cglm_arm/* @@ -71,3 +70,5 @@ win/x85 win/Debug cglm-test-ios* /cglm.pc +test-driver +Default-568h@2x.png diff --git a/.gitmodules b/.gitmodules index 4a3bbdc..e69de29 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "test/lib/cmocka"] - path = test/lib/cmocka - url = git://git.cryptomilk.org/projects/cmocka.git diff --git a/.travis.yml b/.travis.yml index 9f2a546..31b5574 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,10 +37,9 @@ branches: - master script: - - sh ./build-deps.sh - sh ./autogen.sh - if [[ "$CC" == "gcc" && "$CODE_COVERAGE" == "ON" ]]; then - ./configure CFLAGS="-ftest-coverage -fprofile-arcs"; + ./configure CFLAGS="-ftest-coverage -fprofile-arcs -coverage"; else ./configure; fi @@ -49,14 +48,15 @@ script: after_success: - if [[ "$CC" == "gcc" && "$CODE_COVERAGE" == "ON" ]]; then - pip install --user cpp-coveralls && + pip install --user cpp-coveralls && coveralls --build-root . --exclude lib --exclude test --gcov-options '\-lp' - --verbose; + --verbose && + bash <(curl -s https://codecov.io/bash); fi -after_failure: - - cat ./test-suite.log +# after_failure: +# - cat ./test-suite.log diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 567642d..f25a6ab 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,11 +1,11 @@ # CONTRIBUTING -Any contributions (code, documentation, ...) are welcome. This project uses [cmocka](http://cmocka.org) for testing, you may need to check their documentation +Any contributions (code, documentation, ...) are welcome. # New Features - This library may not accept all new features, it is better to create an issue and get approval before coding - You must add test for every new feature -- The feature must be compiled in both UNIX/POSIX systems (e.g. macos, linux...) and Windows +- The feature must be compiled on both UNIX/POSIX systems (e.g. macos, linux...) and Windows # Code Style This library is written with C99, don't try to add C++ files (yes it can compiled into lib), diff --git a/Makefile.am b/Makefile.am index 922aefe..f755b65 100644 --- a/Makefile.am +++ b/Makefile.am @@ -7,23 +7,21 @@ #****************************************************************************** ACLOCAL_AMFLAGS = -I m4 - AM_CFLAGS = -Wall \ - -std=gnu99 \ + -std=gnu11 \ -O3 \ -Wstrict-aliasing=2 \ -fstrict-aliasing \ - -pedantic + -pedantic \ + -Werror=strict-prototypes lib_LTLIBRARIES = libcglm.la libcglm_la_LDFLAGS = -no-undefined -version-info 0:1:0 checkLDFLAGS = -L./.libs \ - -L./test/lib/cmocka/build/src \ - -lcmocka \ -lm \ -lcglm -checkCFLAGS = -I./test/lib/cmocka/include \ +checkCFLAGS = $(AM_CFLAGS) \ -I./include check_PROGRAMS = test/tests @@ -34,14 +32,16 @@ test_tests_CFLAGS = $(checkCFLAGS) cglmdir=$(includedir)/cglm cglm_HEADERS = include/cglm/version.h \ + include/cglm/common.h \ + include/cglm/types.h \ + include/cglm/types-struct.h \ include/cglm/cglm.h \ include/cglm/call.h \ + include/cglm/struct.h \ include/cglm/cam.h \ include/cglm/io.h \ include/cglm/mat4.h \ include/cglm/mat3.h \ - include/cglm/types.h \ - include/cglm/common.h \ include/cglm/affine.h \ include/cglm/vec3.h \ include/cglm/vec3-ext.h \ @@ -60,9 +60,8 @@ cglm_HEADERS = include/cglm/version.h \ include/cglm/ease.h \ include/cglm/curve.h \ include/cglm/bezier.h \ - include/cglm/types-struct.h \ - include/cglm/struct.h - + include/cglm/applesimd.h + cglm_calldir=$(includedir)/cglm/call cglm_call_HEADERS = include/cglm/call/mat4.h \ include/cglm/call/mat3.h \ @@ -140,19 +139,14 @@ libcglm_la_SOURCES=\ src/bezier.c test_tests_SOURCES=\ + test/runner.c \ test/src/test_common.c \ - test/src/test_main.c \ - test/src/test_mat4.c \ + test/src/tests.c \ test/src/test_cam.c \ - test/src/test_project.c \ test/src/test_clamp.c \ test/src/test_euler.c \ - test/src/test_quat.c \ - test/src/test_vec4.c \ - test/src/test_vec3.c \ - test/src/test_mat3.c \ - test/src/test_affine.c \ - test/src/test_bezier.c + test/src/test_bezier.c \ + test/src/test_struct.c pkgconfig_DATA=cglm.pc @@ -160,7 +154,7 @@ pkgconfig_DATA=cglm.pc # the source directory that post-build.sh is in. When not # using a prefix, $VPATH will be unset, so we need to fall # back to using . to run the script. -export VPATH +#export VPATH -all-local: - sh $${VPATH:-.}/post-build.sh +# all-local: +# sh $${VPATH:-.}/post-build.sh diff --git a/README.md b/README.md index 65049dd..5243140 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![Build status](https://ci.appveyor.com/api/projects/status/av7l3gc0yhfex8y4/branch/master?svg=true)](https://ci.appveyor.com/project/recp/cglm/branch/master) [![Documentation Status](https://readthedocs.org/projects/cglm/badge/?version=latest)](http://cglm.readthedocs.io/en/latest/?badge=latest) [![Coverage Status](https://coveralls.io/repos/github/recp/cglm/badge.svg?branch=master)](https://coveralls.io/github/recp/cglm?branch=master) +[![codecov](https://codecov.io/gh/recp/cglm/branch/master/graph/badge.svg)](https://codecov.io/gh/recp/cglm) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/6a62b37d5f214f178ebef269dc4a6bf1)](https://www.codacy.com/app/recp/cglm?utm_source=github.com&utm_medium=referral&utm_content=recp/cglm&utm_campaign=Badge_Grade) [![Backers on Open Collective](https://opencollective.com/cglm/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/cglm/sponsors/badge.svg)](#sponsors) @@ -32,7 +33,7 @@ https://github.com/g-truc/glm #### Note for new comers (Important): - `vec4` and `mat4` variables must be aligned. (There will be unaligned versions later) - **in** and **[in, out]** parameters must be initialized (please). But **[out]** parameters not, initializing out param is also redundant -- All functions are inline if you don't want to use pre-compiled versions with glmc_ prefix, you can ignore build process. Just incliude headers. +- All functions are inline if you don't want to use pre-compiled versions with glmc_ prefix, you can ignore build process. Just include headers. - if your debugger takes you to cglm headers then make sure you are not trying to copy vec4 to vec3 or alig issues... - Welcome! @@ -87,6 +88,7 @@ Currently *cglm* uses default clip space configuration (-1, 1) for camera functi - easing functions - curves - curve interpolation helpers (S*M*C, deCasteljau...) +- helpers to convert cglm types to Apple's simd library to pass cglm types to Metal GL without packing them on both sides - and others...
@@ -129,20 +131,31 @@ glm_mul(T, R, modelMat); glm_inv_tr(modelMat); ``` +### Struct API + +The struct API works as follows, note the `s` suffix on types, the `glms_` prefix on functions and the `GLMS_` prefix on constants: + +```C +#include + +mat4s mat = GLMS_MAT4_IDENTITY_INIT; +mat4s inv = glms_mat4_inv(mat); +``` + +Struct functions generally take their parameters as *values* and *return* their results, rather than taking pointers and writing to out parameters. That means your parameters can usually be `const`, if you're into that. + +The types used are actually unions that allow access to the same data multiple ways. One of those ways involves anonymous structures, available since C11. MSVC also supports it for earlier C versions out of the box and GCC/Clang do if you enable `-fms-extensions`. To explicitly enable these anonymous structures, `#define CGLM_USE_ANONYMOUS_STRUCT` to `1`, to disable them, to `0`. For backward compatibility, you can also `#define CGLM_NO_ANONYMOUS_STRUCT` (value is irrelevant) to disable them. If you don't specify explicitly, cglm will do a best guess based on your compiler and the C version you're using. + ## Build ### Unix (Autotools) ```bash -$ sh ./build-deps.sh # run only once (dependencies) [Optional]. -$ # You can pass this step if you don't want to run `make check` for tests. -$ # cglm uses cmocka for tests and it may reqiure cmake for building it -$ $ sh autogen.sh $ ./configure $ make -$ make check # [Optional] (if you run `sh ./build-deps.sh`) -$ [sudo] make install +$ make check # [Optional] +$ [sudo] make install # [Optional] ``` This will also install pkg-config files so you can use @@ -170,6 +183,10 @@ if `msbuild` won't work (because of multi version VS) then try to build with `de $ devenv cglm.sln /Build Release ``` +#### Running Tests on Windows + +You can see test project in same visual studio solution file. It is enough to run that project to run tests. + ### Building Docs First you need install Sphinx: http://www.sphinx-doc.org/en/master/usage/installation.html then: diff --git a/build-deps.sh b/build-deps.sh deleted file mode 100644 index 20365ba..0000000 --- a/build-deps.sh +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/sh -# -# Copyright (c), Recep Aslantas. -# -# MIT License (MIT), http://opensource.org/licenses/MIT -# Full license can be found in the LICENSE file -# - -# check if deps are pulled -git submodule update --init --recursive - -cd $(dirname "$0") - -# general deps: gcc make autoconf automake libtool cmake - -# test - cmocka -cd ./test/lib/cmocka -rm -rf build -mkdir -p build -cd build -cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Debug .. -make -j8 -cd ../../../../ diff --git a/cglm.podspec b/cglm.podspec index f49c294..bb6fe23 100644 --- a/cglm.podspec +++ b/cglm.podspec @@ -2,7 +2,7 @@ Pod::Spec.new do |s| # Description s.name = "cglm" - s.version = "0.5.1" + s.version = "0.6.1" s.summary = "📽 Optimized OpenGL/Graphics Math (glm) for C" s.description = <<-DESC cglm is math library for graphics programming for C. It is similar to original glm but it is written for C instead of C++ (you can use here too). See the documentation or README for all features. diff --git a/configure.ac b/configure.ac index 071ac47..520e92a 100644 --- a/configure.ac +++ b/configure.ac @@ -7,8 +7,11 @@ #***************************************************************************** AC_PREREQ([2.69]) -AC_INIT([cglm], [0.6.0], [info@recp.me]) -AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects]) +AC_INIT([cglm], [0.6.3], [info@recp.me]) +AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects serial-tests]) + +# Don't use the default cflags (-O2 -g), we set ours manually in Makefile.am. +: ${CFLAGS=""} AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_SRCDIR([src/]) diff --git a/docs/source/build.rst b/docs/source/build.rst index c725ab4..c62fe0c 100644 --- a/docs/source/build.rst +++ b/docs/source/build.rst @@ -1,10 +1,7 @@ Build cglm ================================ -| **cglm** does not have external dependencies except for unit testing. When you pulled **cglm** repo with submodules all dependencies will be pulled too. `build-deps.sh` will pull all dependencies/submodules and build for you. - -External dependencies: - * cmocka - for unit testing +| **cglm** does not have any external dependencies. **NOTE:** If you only need to inline versions, you don't need to build **cglm**, you don't need to link it to your program. @@ -16,8 +13,6 @@ Unix (Autotools): .. code-block:: bash :linenos: - $ sh ./build-deps.sh # run this only once (dependencies) - $ sh autogen.sh $ ./configure $ make @@ -65,4 +60,3 @@ Example build: $ cd cglm/docs $ sphinx-build source build - diff --git a/docs/source/conf.py b/docs/source/conf.py index 4cd1de2..6fe943d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -62,9 +62,9 @@ author = u'Recep Aslantas' # built documents. # # The short X.Y version. -version = u'0.6.0' +version = u'0.6.3' # The full version, including alpha/beta/rc tags. -release = u'0.6.0' +release = u'0.6.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/source/quat.rst b/docs/source/quat.rst index caac1bf..cc991da 100644 --- a/docs/source/quat.rst +++ b/docs/source/quat.rst @@ -324,26 +324,24 @@ Functions documentation | *[in]* **ori** orientation in world space as quaternion | *[out]* **dest** result matrix -.. c:function:: void glm_quat_for(vec3 dir, vec3 fwd, vec3 up, versor dest) +.. c:function:: void glm_quat_for(vec3 dir, vec3 up, versor dest) | creates look rotation quaternion Parameters: | *[in]* **dir** direction to look - | *[in]* **fwd** forward vector | *[in]* **up** up vector | *[out]* **dest** result matrix -.. c:function:: void glm_quat_forp(vec3 from, vec3 to, vec3 fwd, vec3 up, versor dest) +.. c:function:: void glm_quat_forp(vec3 from, vec3 to, vec3 up, versor dest) - | creates look rotation quaternion using source and destination positions p suffix stands for position + | creates look rotation quaternion using source and destination positions p suffix stands for position | this is similar to glm_quat_for except this computes direction for glm_quat_for for you. Parameters: | *[in]* **from** source point | *[in]* **to** destination point - | *[in]* **fwd** forward vector | *[in]* **up** up vector | *[out]* **dest** result matrix diff --git a/docs/source/troubleshooting.rst b/docs/source/troubleshooting.rst index e4cea9f..ad6a009 100644 --- a/docs/source/troubleshooting.rst +++ b/docs/source/troubleshooting.rst @@ -57,6 +57,13 @@ For instance you may called **glm_vec4_** functions for **vec3** data type. It will try to write 32 byte but since **vec3** is 24 byte it should throw memory access error or exit the app without saying anything. +**UPDATE - IMPORTANT:** + + | On MSVC or some other compilers, if alignment is enabled (default) then double check alignment requirements if you got a crash. + + | If you send GLM_VEC4_ONE or similar macros directly to a function, it may be crashed. + | Because compiler may not apply alignment as defined on **typedef** to that macro while passing it (on stack) to a function. + Wrong Results: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/include/cglm/applesimd.h b/include/cglm/applesimd.h new file mode 100644 index 0000000..3608bb3 --- /dev/null +++ b/include/cglm/applesimd.h @@ -0,0 +1,95 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#ifndef cglm_applesimd_h +#define cglm_applesimd_h +#if defined(__APPLE__) \ + && defined(SIMD_COMPILER_HAS_REQUIRED_FEATURES) \ + && defined(SIMD_BASE) \ + && defined(SIMD_TYPES) \ + && defined(SIMD_VECTOR_TYPES) + +#include "common.h" + +/*! +* @brief converts mat4 to Apple's simd type simd_float4x4 +* @return simd_float4x4 +*/ +CGLM_INLINE +simd_float4x4 +glm_mat4_applesimd(mat4 m) { + simd_float4x4 t; + + t.columns[0][0] = m[0][0]; + t.columns[0][1] = m[0][1]; + t.columns[0][2] = m[0][2]; + t.columns[0][3] = m[0][3]; + + t.columns[1][0] = m[1][0]; + t.columns[1][1] = m[1][1]; + t.columns[1][2] = m[1][2]; + t.columns[1][3] = m[1][3]; + + t.columns[2][0] = m[2][0]; + t.columns[2][1] = m[2][1]; + t.columns[2][2] = m[2][2]; + t.columns[2][3] = m[2][3]; + + t.columns[3][0] = m[3][0]; + t.columns[3][1] = m[3][1]; + t.columns[3][2] = m[3][2]; + t.columns[3][3] = m[3][3]; + + return t; +} + +/*! +* @brief converts mat3 to Apple's simd type simd_float3x3 +* @return simd_float3x3 +*/ +CGLM_INLINE +simd_float3x3 +glm_mat3_applesimd(mat3 m) { + simd_float3x3 t; + + t.columns[0][0] = m[0][0]; + t.columns[0][1] = m[0][1]; + t.columns[0][2] = m[0][2]; + + t.columns[1][0] = m[1][0]; + t.columns[1][1] = m[1][1]; + t.columns[1][2] = m[1][2]; + + t.columns[2][0] = m[2][0]; + t.columns[2][1] = m[2][1]; + t.columns[2][2] = m[2][2]; + + return t; +} + +/*! +* @brief converts vec4 to Apple's simd type simd_float4 +* @return simd_float4 +*/ +CGLM_INLINE +simd_float4 +glm_vec4_applesimd(vec4 v) { + return (simd_float4){v[0], v[1], v[2], v[3]}; +} + +/*! +* @brief converts vec3 to Apple's simd type simd_float3 +* @return v +*/ +CGLM_INLINE +simd_float3 +glm_vec3_applesimd(vec3 v) { + return (simd_float3){v[0], v[1], v[2]}; +} + +#endif +#endif /* cglm_applesimd_h */ diff --git a/include/cglm/bezier.h b/include/cglm/bezier.h index 5dac859..2bbe09f 100644 --- a/include/cglm/bezier.h +++ b/include/cglm/bezier.h @@ -22,9 +22,9 @@ #define GLM_BEZIER_MAT ((mat4)GLM_BEZIER_MAT_INIT) #define GLM_HERMITE_MAT ((mat4)GLM_HERMITE_MAT_INIT) -#define CGLM_DECASTEL_EPS 1e-9 -#define CGLM_DECASTEL_MAX 1000 -#define CGLM_DECASTEL_SMALL 1e-20 +#define CGLM_DECASTEL_EPS 1e-9f +#define CGLM_DECASTEL_MAX 1000.0f +#define CGLM_DECASTEL_SMALL 1e-20f /*! * @brief cubic bezier interpolation diff --git a/include/cglm/call/mat3.h b/include/cglm/call/mat3.h index fbd8270..36dcb27 100644 --- a/include/cglm/call/mat3.h +++ b/include/cglm/call/mat3.h @@ -24,6 +24,10 @@ CGLM_EXPORT void glmc_mat3_identity(mat3 mat); +CGLM_EXPORT +void +glmc_mat3_zero(mat3 mat); + CGLM_EXPORT void glmc_mat3_identity_array(mat3 * __restrict mat, size_t count); diff --git a/include/cglm/call/mat4.h b/include/cglm/call/mat4.h index 54fbcbe..1c71da1 100644 --- a/include/cglm/call/mat4.h +++ b/include/cglm/call/mat4.h @@ -33,6 +33,10 @@ CGLM_EXPORT void glmc_mat4_identity_array(mat4 * __restrict mat, size_t count); +CGLM_EXPORT +void +glmc_mat4_zero(mat4 mat); + CGLM_EXPORT void glmc_mat4_pick3(mat4 mat, mat3 dest); diff --git a/include/cglm/call/quat.h b/include/cglm/call/quat.h index ad5f82d..32e503c 100644 --- a/include/cglm/call/quat.h +++ b/include/cglm/call/quat.h @@ -131,11 +131,11 @@ glmc_quat_look(vec3 eye, versor ori, mat4 dest); CGLM_EXPORT void -glmc_quat_for(vec3 dir, vec3 fwd, vec3 up, versor dest); +glmc_quat_for(vec3 dir, vec3 up, versor dest); CGLM_EXPORT void -glmc_quat_forp(vec3 from, vec3 to, vec3 fwd, vec3 up, versor dest); +glmc_quat_forp(vec3 from, vec3 to, vec3 up, versor dest); CGLM_EXPORT void diff --git a/include/cglm/cam.h b/include/cglm/cam.h index ad971fe..bbe7fec 100644 --- a/include/cglm/cam.h +++ b/include/cglm/cam.h @@ -319,10 +319,7 @@ glm_perspective_resize(float aspect, mat4 proj) { */ CGLM_INLINE void -glm_lookat(vec3 eye, - vec3 center, - vec3 up, - mat4 dest) { +glm_lookat(vec3 eye, vec3 center, vec3 up, mat4 dest) { CGLM_ALIGN(8) vec3 f, u, s; glm_vec3_sub(center, eye, f); diff --git a/include/cglm/mat4.h b/include/cglm/mat4.h index e6b916b..7a72f97 100644 --- a/include/cglm/mat4.h +++ b/include/cglm/mat4.h @@ -446,6 +446,9 @@ glm_mat4_quat(mat4 m, versor dest) { /*! * @brief multiply vector with mat4 * + * actually the result is vec4, after multiplication the last component + * is trimmed. if you need it don't use this func. + * * @param[in] m mat4(affine transform) * @param[in] v vec3 * @param[in] last 4th item to make it vec4 diff --git a/include/cglm/plane.h b/include/cglm/plane.h index 7a5291d..15ae580 100644 --- a/include/cglm/plane.h +++ b/include/cglm/plane.h @@ -30,7 +30,14 @@ CGLM_INLINE void glm_plane_normalize(vec4 plane) { - glm_vec4_scale(plane, 1.0f / glm_vec3_norm(plane), plane); + float norm; + + if ((norm = glm_vec3_norm(plane)) == 0.0f) { + glm_vec4_zero(plane); + return; + } + + glm_vec4_scale(plane, 1.0f / norm, plane); } #endif /* cglm_plane_h */ diff --git a/include/cglm/quat.h b/include/cglm/quat.h index 573932f..b2e32e4 100644 --- a/include/cglm/quat.h +++ b/include/cglm/quat.h @@ -693,32 +693,23 @@ glm_quat_look(vec3 eye, versor ori, mat4 dest) { * @brief creates look rotation quaternion * * @param[in] dir direction to look - * @param[in] fwd forward vector * @param[in] up up vector * @param[out] dest destination quaternion */ CGLM_INLINE void -glm_quat_for(vec3 dir, vec3 fwd, vec3 up, versor dest) { - CGLM_ALIGN(8) vec3 axis; - float dot, angle; +glm_quat_for(vec3 dir, vec3 up, versor dest) { + CGLM_ALIGN_MAT mat3 m; - dot = glm_vec3_dot(dir, fwd); - if (fabsf(dot + 1.0f) < 0.000001f) { - glm_quat_init(dest, up[0], up[1], up[2], GLM_PIf); - return; - } + glm_vec3_normalize_to(dir, m[2]); - if (fabsf(dot - 1.0f) < 0.000001f) { - glm_quat_identity(dest); - return; - } + /* No need to negate in LH, but we use RH here */ + glm_vec3_negate(m[2]); + + glm_vec3_crossn(up, m[2], m[0]); + glm_vec3_cross(m[2], m[0], m[1]); - angle = acosf(dot); - glm_cross(fwd, dir, axis); - glm_normalize(axis); - - glm_quatv(dest, angle, axis); + glm_mat3_quat(m, dest); } /*! @@ -727,16 +718,15 @@ glm_quat_for(vec3 dir, vec3 fwd, vec3 up, versor dest) { * * @param[in] from source point * @param[in] to destination point - * @param[in] fwd forward vector * @param[in] up up vector * @param[out] dest destination quaternion */ CGLM_INLINE void -glm_quat_forp(vec3 from, vec3 to, vec3 fwd, vec3 up, versor dest) { +glm_quat_forp(vec3 from, vec3 to, vec3 up, versor dest) { CGLM_ALIGN(8) vec3 dir; glm_vec3_sub(to, from, dir); - glm_quat_for(dir, fwd, up, dest); + glm_quat_for(dir, up, dest); } /*! diff --git a/include/cglm/simd/arm.h b/include/cglm/simd/arm.h index d0e5072..64b2dad 100644 --- a/include/cglm/simd/arm.h +++ b/include/cglm/simd/arm.h @@ -34,17 +34,19 @@ glmm_hadd(float32x4_t v) { static inline float glmm_hmin(float32x4_t v) { - v = vpmin_f32(vget_low_f32(v), vget_high_f32(v)); - v = vpmin_f32(v, v); - return vget_lane_f32(v, 0); + float32x2_t t; + t = vpmin_f32(vget_low_f32(v), vget_high_f32(v)); + t = vpmin_f32(t, t); + return vget_lane_f32(t, 0); } static inline float glmm_hmax(float32x4_t v) { - v = vpmax_f32(vget_low_f32(v), vget_high_f32(v)); - v = vpmax_f32(v, v); - return vget_lane_f32(v, 0); + float32x2_t t; + t = vpmax_f32(vget_low_f32(v), vget_high_f32(v)); + t = vpmax_f32(t, t); + return vget_lane_f32(t, 0); } static inline diff --git a/include/cglm/struct/affine.h b/include/cglm/struct/affine.h index 36dbe0f..b8c6f6d 100644 --- a/include/cglm/struct/affine.h +++ b/include/cglm/struct/affine.h @@ -16,7 +16,7 @@ CGLM_INLINE mat4s glms_scale_make(vec3s v); CGLM_INLINE mat4s glms_scale(mat4s m, vec3s v); CGLM_INLINE mat4s glms_scale_uni(mat4s m, float s); - CGLM_INLINE mat4s glmx_rotate_x(mat4s m, float angle); + CGLM_INLINE mat4s glms_rotate_x(mat4s m, float angle); CGLM_INLINE mat4s glms_rotate_y(mat4s m, float angle); CGLM_INLINE mat4s glms_rotate_z(mat4s m, float angle); CGLM_INLINE mat4s glms_rotate_make(float angle, vec3s axis); @@ -169,7 +169,7 @@ glms_scale_uni(mat4s m, float s) { */ CGLM_INLINE mat4s -glmx_rotate_x(mat4s m, float angle) { +glms_rotate_x(mat4s m, float angle) { mat4s r; glm_rotate_x(m.raw, angle, r.raw); return r; diff --git a/include/cglm/struct/mat3.h b/include/cglm/struct/mat3.h index 5c474e6..53a7273 100644 --- a/include/cglm/struct/mat3.h +++ b/include/cglm/struct/mat3.h @@ -14,9 +14,9 @@ Functions: CGLM_INLINE mat3s glms_mat3_copy(mat3s mat); - CGLM_INLINE mat3s glms_mat3_identity(); + CGLM_INLINE mat3s glms_mat3_identity(void); CGLM_INLINE void glms_mat3_identity_array(mat3s * __restrict mat, size_t count); - CGLM_INLINE mat3s glms_mat3_zero(); + CGLM_INLINE mat3s glms_mat3_zero(void); CGLM_INLINE mat3s glms_mat3_mul(mat3s m1, mat3s m2); CGLM_INLINE ma3s glms_mat3_transpose(mat3s m); CGLM_INLINE vec3s glms_mat3_mulv(mat3s m, vec3s v); @@ -38,12 +38,8 @@ #include "../mat3.h" #include "vec3.h" -#define GLMS_MAT3_IDENTITY_INIT {1.0f, 0.0f, 0.0f, \ - 0.0f, 1.0f, 0.0f, \ - 0.0f, 0.0f, 1.0f} -#define GLMS_MAT3_ZERO_INIT {0.0f, 0.0f, 0.0f, \ - 0.0f, 0.0f, 0.0f, \ - 0.0f, 0.0f, 0.0f} +#define GLMS_MAT3_IDENTITY_INIT {GLM_MAT3_IDENTITY_INIT} +#define GLMS_MAT3_ZERO_INIT {GLM_MAT3_ZERO_INIT} /* for C only */ #define GLMS_MAT3_IDENTITY ((mat3s)GLMS_MAT3_IDENTITY_INIT) @@ -79,7 +75,7 @@ glms_mat3_copy(mat3s mat) { */ CGLM_INLINE mat3s -glms_mat3_identity() { +glms_mat3_identity(void) { mat3s r; glm_mat3_identity(r.raw); return r; @@ -111,7 +107,7 @@ glms_mat3_identity_array(mat3s * __restrict mat, size_t count) { */ CGLM_INLINE mat3s -glms_mat3_zero() { +glms_mat3_zero(void) { mat3s r; glm_mat3_zero(r.raw); return r; diff --git a/include/cglm/struct/mat4.h b/include/cglm/struct/mat4.h index ef72e31..28f80a3 100644 --- a/include/cglm/struct/mat4.h +++ b/include/cglm/struct/mat4.h @@ -20,9 +20,9 @@ Functions: CGLM_INLINE mat4s glms_mat4_ucopy(mat4s mat); CGLM_INLINE mat4s glms_mat4_copy(mat4s mat); - CGLM_INLINE mat4s glms_mat4_identity(); + CGLM_INLINE mat4s glms_mat4_identity(void); CGLM_INLINE void glms_mat4_identity_array(mat4s * __restrict mat, size_t count); - CGLM_INLINE mat4s glms_mat4_zero(); + CGLM_INLINE mat4s glms_mat4_zero(void); CGLM_INLINE mat3s glms_mat4_pick3(mat4s mat); CGLM_INLINE mat3s glms_mat4_pick3t(mat4s mat); CGLM_INLINE mat4s glms_mat4_ins3(mat3s mat); @@ -53,15 +53,8 @@ #include "vec4.h" #include "vec3.h" -#define GLMS_MAT4_IDENTITY_INIT {1.0f, 0.0f, 0.0f, 0.0f, \ - 0.0f, 1.0f, 0.0f, 0.0f, \ - 0.0f, 0.0f, 1.0f, 0.0f, \ - 0.0f, 0.0f, 0.0f, 1.0f} - -#define GLMS_MAT4_ZERO_INIT {0.0f, 0.0f, 0.0f, 0.0f, \ - 0.0f, 0.0f, 0.0f, 0.0f, \ - 0.0f, 0.0f, 0.0f, 0.0f, \ - 0.0f, 0.0f, 0.0f, 0.0f} +#define GLMS_MAT4_IDENTITY_INIT {GLM_MAT4_IDENTITY_INIT} +#define GLMS_MAT4_ZERO_INIT {GLM_MAT4_ZERO_INIT} /* for C only */ #define GLMS_MAT4_IDENTITY ((mat4s)GLMS_MAT4_IDENTITY_INIT) @@ -114,7 +107,7 @@ glms_mat4_copy(mat4s mat) { */ CGLM_INLINE mat4s -glms_mat4_identity() { +glms_mat4_identity(void) { mat4s r; glm_mat4_identity(r.raw); return r; @@ -146,7 +139,7 @@ glms_mat4_identity_array(mat4s * __restrict mat, size_t count) { */ CGLM_INLINE mat4s -glms_mat4_zero() { +glms_mat4_zero(void) { mat4s r; glm_mat4_zero(r.raw); return r; diff --git a/include/cglm/struct/quat.h b/include/cglm/struct/quat.h index 3b0c8ee..8b3774a 100644 --- a/include/cglm/struct/quat.h +++ b/include/cglm/struct/quat.h @@ -11,7 +11,7 @@ GLMS_QUAT_IDENTITY Functions: - CGLM_INLINE versors glms_quat_identity() + CGLM_INLINE versors glms_quat_identity(void) CGLM_INLINE void glms_quat_identity_array(versor *q, size_t count) CGLM_INLINE versors glms_quat_init(float x, float y, float z, float w) CGLM_INLINE versors glms_quatv(float angle, vec3s axis) @@ -62,7 +62,7 @@ * ---------------------------------------------------------------------------- */ -#define GLMS_QUAT_IDENTITY_INIT GLM_QUAT_IDENTITY_INIT +#define GLMS_QUAT_IDENTITY_INIT {GLM_QUAT_IDENTITY_INIT} #define GLMS_QUAT_IDENTITY ((versors)GLMS_QUAT_IDENTITY_INIT) /*! @@ -72,7 +72,7 @@ */ CGLM_INLINE versors -glms_quat_identity() { +glms_quat_identity(void) { versors dest; glm_quat_identity(dest.raw); return dest; @@ -251,7 +251,7 @@ CGLM_INLINE vec3s glms_quat_imagn(versors q) { vec3s dest; - glm_normalize_to(q.imag.raw, dest.raw); + glm_normalize_to(q.raw, dest.raw); return dest; } @@ -437,15 +437,14 @@ glms_quat_look(vec3s eye, versors ori) { * @brief creates look rotation quaternion * * @param[in] dir direction to look - * @param[in] fwd forward vector * @param[in] up up vector * @returns destination quaternion */ CGLM_INLINE versors -glms_quat_for(vec3s dir, vec3s fwd, vec3s up) { +glms_quat_for(vec3s dir, vec3s up) { versors dest; - glm_quat_for(dir.raw, fwd.raw, up.raw, dest.raw); + glm_quat_for(dir.raw, up.raw, dest.raw); return dest; } @@ -455,15 +454,14 @@ glms_quat_for(vec3s dir, vec3s fwd, vec3s up) { * * @param[in] from source point * @param[in] to destination point - * @param[in] fwd forward vector * @param[in] up up vector * @returns destination quaternion */ CGLM_INLINE versors -glms_quat_forp(vec3s from, vec3s to, vec3s fwd, vec3s up) { +glms_quat_forp(vec3s from, vec3s to, vec3s up) { versors dest; - glm_quat_forp(from.raw, to.raw, fwd.raw, up.raw, dest.raw); + glm_quat_forp(from.raw, to.raw, up.raw, dest.raw); return dest; } diff --git a/include/cglm/struct/vec3.h b/include/cglm/struct/vec3.h index 0cd2f5c..7fa5b06 100644 --- a/include/cglm/struct/vec3.h +++ b/include/cglm/struct/vec3.h @@ -19,8 +19,8 @@ CGLM_INLINE vec3s glms_vec3(vec4s v4); CGLM_INLINE void glms_vec3_pack(vec3s dst[], vec3 src[], size_t len); CGLM_INLINE void glms_vec3_unpack(vec3 dst[], vec3s src[], size_t len); - CGLM_INLINE vec3s glms_vec3_zero(); - CGLM_INLINE vec3s glms_vec3_one(); + CGLM_INLINE vec3s glms_vec3_zero(void); + CGLM_INLINE vec3s glms_vec3_one(void); CGLM_INLINE float glms_vec3_dot(vec3s a, vec3s b); CGLM_INLINE float glms_vec3_norm2(vec3s v); CGLM_INLINE float glms_vec3_norm(vec3s v); @@ -86,15 +86,15 @@ #include "../vec3.h" #include "vec3-ext.h" -#define GLMS_VEC3_ONE_INIT {1.0f, 1.0f, 1.0f} -#define GLMS_VEC3_ZERO_INIT {0.0f, 0.0f, 0.0f} +#define GLMS_VEC3_ONE_INIT {GLM_VEC3_ONE_INIT} +#define GLMS_VEC3_ZERO_INIT {GLM_VEC3_ZERO_INIT} #define GLMS_VEC3_ONE ((vec3s)GLMS_VEC3_ONE_INIT) #define GLMS_VEC3_ZERO ((vec3s)GLMS_VEC3_ZERO_INIT) -#define GLMS_YUP ((vec3s){0.0f, 1.0f, 0.0f}) -#define GLMS_ZUP ((vec3s){0.0f, 0.0f, 1.0f}) -#define GLMS_XUP ((vec3s){1.0f, 0.0f, 0.0f}) +#define GLMS_YUP ((vec3s){{0.0f, 1.0f, 0.0f}}) +#define GLMS_ZUP ((vec3s){{0.0f, 0.0f, 1.0f}}) +#define GLMS_XUP ((vec3s){{1.0f, 0.0f, 0.0f}}) /*! * @brief init vec3 using vec4 @@ -151,7 +151,7 @@ glms_vec3_unpack(vec3 dst[], vec3s src[], size_t len) { */ CGLM_INLINE vec3s -glms_vec3_zero() { +glms_vec3_zero(void) { vec3s r; glm_vec3_zero(r.raw); return r; @@ -164,7 +164,7 @@ glms_vec3_zero() { */ CGLM_INLINE vec3s -glms_vec3_one() { +glms_vec3_one(void) { vec3s r; glm_vec3_one(r.raw); return r; diff --git a/include/cglm/struct/vec4.h b/include/cglm/struct/vec4.h index 7137cfa..4469cb2 100644 --- a/include/cglm/struct/vec4.h +++ b/include/cglm/struct/vec4.h @@ -7,12 +7,12 @@ /* Macros: - GLM_VEC4_ONE_INIT - GLM_VEC4_BLACK_INIT - GLM_VEC4_ZERO_INIT - GLM_VEC4_ONE - GLM_VEC4_BLACK - GLM_VEC4_ZERO + GLMS_VEC4_ONE_INIT + GLMS_VEC4_BLACK_INIT + GLMS_VEC4_ZERO_INIT + GLMS_VEC4_ONE + GLMS_VEC4_BLACK + GLMS_VEC4_ZERO Functions: CGLM_INLINE vec4s glms_vec4(vec3s v3, float last); @@ -72,9 +72,9 @@ #include "../vec4.h" #include "vec4-ext.h" -#define GLMS_VEC4_ONE_INIT {1.0f, 1.0f, 1.0f, 1.0f} -#define GLMS_VEC4_BLACK_INIT {0.0f, 0.0f, 0.0f, 1.0f} -#define GLMS_VEC4_ZERO_INIT {0.0f, 0.0f, 0.0f, 0.0f} +#define GLMS_VEC4_ONE_INIT {GLM_VEC4_ONE_INIT} +#define GLMS_VEC4_BLACK_INIT {GLM_VEC4_BLACK_INIT} +#define GLMS_VEC4_ZERO_INIT {GLM_VEC4_ZERO_INIT} #define GLMS_VEC4_ONE ((vec4s)GLM_VEC4_ONE_INIT) #define GLMS_VEC4_BLACK ((vec4s)GLM_VEC4_BLACK_INIT) @@ -180,7 +180,7 @@ glms_vec4_unpack(vec4 dst[], vec4s src[], size_t len) { */ CGLM_INLINE vec4s -glms_vec4_zero() { +glms_vec4_zero(void) { vec4s r; glm_vec4_zero(r.raw); return r; @@ -193,7 +193,7 @@ glms_vec4_zero() { */ CGLM_INLINE vec4s -glms_vec4_one() { +glms_vec4_one(void) { vec4s r; glm_vec4_one(r.raw); return r; diff --git a/include/cglm/types-struct.h b/include/cglm/types-struct.h index f9ff8f5..aad4876 100644 --- a/include/cglm/types-struct.h +++ b/include/cglm/types-struct.h @@ -10,40 +10,71 @@ #include "types.h" +/* + * Anonymous structs are available since C11, but we'd like to be compatible + * with C99 and C89 too. So let's figure out if we should be using them or not. + * It's simply a convenience feature, you can e.g. build the library with + * anonymous structs and your application without them and they'll still be + * compatible, cglm doesn't use the anonymous structs internally. + */ +#ifndef CGLM_USE_ANONYMOUS_STRUCT + /* If the user doesn't explicitly specify if they want anonymous structs or + * not, then we'll try to intuit an appropriate choice. */ +# if defined(CGLM_NO_ANONYMOUS_STRUCT) + /* The user has defined CGLM_NO_ANONYMOUS_STRUCT. This used to be the + * only #define governing the use of anonymous structs, so for backward + * compatibility, we still honor that choice and disable them. */ +# define CGLM_USE_ANONYMOUS_STRUCT 0 +# elif __STDC_VERSION__ >= 20112L || defined(_MSVC_VER) + /* We're compiling for C11 or this is the MSVC compiler. In either + * case, anonymous structs are available, so use them. */ +# define CGLM_USE_ANONYMOUS_STRUCT 1 +# elif defined(_MSC_VER) && (_MSC_VER >= 1900) /* Visual Studio 2015 */ + /* We can support anonymous structs + * since Visual Studio 2015 or 2017 (1910) maybe? */ +# define CGLM_USE_ANONYMOUS_STRUCT 1 +# else + /* Otherwise, we're presumably building for C99 or C89 and can't rely + * on anonymous structs being available. Turn them off. */ +# define CGLM_USE_ANONYMOUS_STRUCT 0 +# endif +#endif + typedef union vec2s { -#ifndef CGLM_NO_ANONYMOUS_STRUCT + vec2 raw; +#if CGLM_USE_ANONYMOUS_STRUCT struct { float x; float y; }; #endif - vec2 raw; } vec2s; typedef union vec3s { -#ifndef CGLM_NO_ANONYMOUS_STRUCT + vec3 raw; +#if CGLM_USE_ANONYMOUS_STRUCT struct { float x; float y; float z; }; #endif - vec3 raw; } vec3s; typedef union ivec3s { -#ifndef CGLM_NO_ANONYMOUS_STRUCT + ivec3 raw; +#if CGLM_USE_ANONYMOUS_STRUCT struct { int x; int y; int z; }; #endif - ivec3 raw; } ivec3s; typedef union CGLM_ALIGN_IF(16) vec4s { -#ifndef CGLM_NO_ANONYMOUS_STRUCT + vec4 raw; +#if CGLM_USE_ANONYMOUS_STRUCT struct { float x; float y; @@ -51,11 +82,11 @@ typedef union CGLM_ALIGN_IF(16) vec4s { float w; }; #endif - vec4 raw; } vec4s; typedef union CGLM_ALIGN_IF(16) versors { -#ifndef CGLM_NO_ANONYMOUS_STRUCT + vec4 raw; +#if CGLM_USE_ANONYMOUS_STRUCT struct { float x; float y; @@ -68,34 +99,35 @@ typedef union CGLM_ALIGN_IF(16) versors { float real; }; #endif - vec4 raw; } versors; typedef union mat2s { -#ifndef CGLM_NO_ANONYMOUS_STRUCT + mat2 raw; + vec2s col[2]; +#if CGLM_USE_ANONYMOUS_STRUCT struct { float m00, m01; float m10, m11; }; #endif - vec2s col[2]; - mat2 raw; } mat2s; typedef union mat3s { -#ifndef CGLM_NO_ANONYMOUS_STRUCT + mat3 raw; + vec3s col[3]; +#if CGLM_USE_ANONYMOUS_STRUCT struct { float m00, m01, m02; float m10, m11, m12; float m20, m21, m22; }; #endif - vec3s col[3]; - mat3 raw; } mat3s; typedef union CGLM_ALIGN_MAT mat4s { -#ifndef CGLM_NO_ANONYMOUS_STRUCT + mat4 raw; + vec4s col[4]; +#if CGLM_USE_ANONYMOUS_STRUCT struct { float m00, m01, m02, m03; float m10, m11, m12, m13; @@ -103,8 +135,6 @@ typedef union CGLM_ALIGN_MAT mat4s { float m30, m31, m32, m33; }; #endif - vec4s col[4]; - mat4 raw; } mat4s; #endif /* cglm_types_struct_h */ diff --git a/include/cglm/types.h b/include/cglm/types.h index 79038e3..60eb538 100644 --- a/include/cglm/types.h +++ b/include/cglm/types.h @@ -36,11 +36,16 @@ typedef float vec2[2]; typedef float vec3[3]; typedef int ivec3[3]; typedef CGLM_ALIGN_IF(16) float vec4[4]; -typedef vec4 versor; +typedef vec4 versor; /* |x, y, z, w| -> w is the last */ typedef vec3 mat3[3]; typedef CGLM_ALIGN_IF(16) vec2 mat2[2]; typedef CGLM_ALIGN_MAT vec4 mat4[4]; +/* + Important: cglm stores quaternion as [x, y, z, w] in memory since v0.4.0 + it was [w, x, y, z] before v0.4.0 ( v0.3.5 and earlier ). w is real part. +*/ + #define GLM_E 2.71828182845904523536028747135266250 /* e */ #define GLM_LOG2E 1.44269504088896340735992468100189214 /* log2(e) */ #define GLM_LOG10E 0.434294481903251827651128918916605082 /* log10(e) */ diff --git a/include/cglm/vec3.h b/include/cglm/vec3.h index 2bb6231..7abeaf5 100644 --- a/include/cglm/vec3.h +++ b/include/cglm/vec3.h @@ -111,9 +111,10 @@ #define GLM_VEC3_ONE ((vec3)GLM_VEC3_ONE_INIT) #define GLM_VEC3_ZERO ((vec3)GLM_VEC3_ZERO_INIT) -#define GLM_YUP ((vec3){0.0f, 1.0f, 0.0f}) -#define GLM_ZUP ((vec3){0.0f, 0.0f, 1.0f}) -#define GLM_XUP ((vec3){1.0f, 0.0f, 0.0f}) +#define GLM_YUP ((vec3){0.0f, 1.0f, 0.0f}) +#define GLM_ZUP ((vec3){0.0f, 0.0f, 1.0f}) +#define GLM_XUP ((vec3){1.0f, 0.0f, 0.0f}) +#define GLM_FORWARD ((vec3){0.0f, 0.0f, -1.0f}) #define GLM_XXX GLM_SHUFFLE3(0, 0, 0) #define GLM_YYY GLM_SHUFFLE3(1, 1, 1) @@ -996,6 +997,27 @@ glm_vec3_smoothinterpc(vec3 from, vec3 to, float t, vec3 dest) { glm_vec3_smoothinterp(from, to, glm_clamp_zo(t), dest); } +/*! + * @brief swizzle vector components + * + * you can use existin masks e.g. GLM_XXX, GLM_ZYX + * + * @param[in] v source + * @param[in] mask mask + * @param[out] dest destination + */ +CGLM_INLINE +void +glm_vec3_swizzle(vec3 v, int mask, vec3 dest) { + vec3 t; + + t[0] = v[(mask & (3 << 0))]; + t[1] = v[(mask & (3 << 2)) >> 2]; + t[2] = v[(mask & (3 << 4)) >> 4]; + + glm_vec3_copy(t, dest); +} + /*! * @brief vec3 cross product * @@ -1054,25 +1076,4 @@ glm_normalize_to(vec3 v, vec3 dest) { glm_vec3_normalize_to(v, dest); } -/*! - * @brief swizzle vector components - * - * you can use existin masks e.g. GLM_XXX, GLM_ZYX - * - * @param[in] v source - * @param[in] mask mask - * @param[out] dest destination - */ -CGLM_INLINE -void -glm_vec3_swizzle(vec3 v, int mask, vec3 dest) { - vec3 t; - - t[0] = v[(mask & (3 << 0))]; - t[1] = v[(mask & (3 << 2)) >> 2]; - t[2] = v[(mask & (3 << 4)) >> 4]; - - glm_vec3_copy(t, dest); -} - #endif /* cglm_vec3_h */ diff --git a/include/cglm/vec4-ext.h b/include/cglm/vec4-ext.h index 583376b..6baa70f 100644 --- a/include/cglm/vec4-ext.h +++ b/include/cglm/vec4-ext.h @@ -252,7 +252,7 @@ glm_vec4_abs(vec4 v, vec4 dest) { #if defined( __SSE__ ) || defined( __SSE2__ ) glmm_store(dest, glmm_abs(glmm_load(v))); #elif defined(CGLM_NEON_FP) - vst1q_f32(dest, vabsq_f32(vld1q_f32(a))); + vst1q_f32(dest, vabsq_f32(vld1q_f32(v))); #else dest[0] = fabsf(v[0]); dest[1] = fabsf(v[1]); diff --git a/include/cglm/version.h b/include/cglm/version.h index 5843f4b..e3cd788 100644 --- a/include/cglm/version.h +++ b/include/cglm/version.h @@ -10,6 +10,6 @@ #define CGLM_VERSION_MAJOR 0 #define CGLM_VERSION_MINOR 6 -#define CGLM_VERSION_PATCH 0 +#define CGLM_VERSION_PATCH 3 #endif /* cglm_version_h */ diff --git a/post-build.sh b/post-build.sh deleted file mode 100644 index 6fa6e7a..0000000 --- a/post-build.sh +++ /dev/null @@ -1,24 +0,0 @@ -#! /bin/sh -# -# Copyright (c), Recep Aslantas. -# -# MIT License (MIT), http://opensource.org/licenses/MIT -# Full license can be found in the LICENSE file -# - -cd $(dirname "$0") - -mkdir -p "$(pwd)/.libs" - -libmocka_folder=$(pwd)/test/lib/cmocka/build/src/ - -if [ "$(uname)" = "Darwin" ]; then - libcmocka=libcmocka.0.dylib -else - libcmocka=libcmocka.so.0 -fi - -libcmocka_path="$libmocka_folder$libcmocka" -if [ -f "$libcmocka_path" ]; then - ln -sf "$libcmocka_path" "$(pwd)/.libs/$libcmocka"; -fi diff --git a/src/mat3.c b/src/mat3.c index 337f1f1..1286bd9 100644 --- a/src/mat3.c +++ b/src/mat3.c @@ -20,6 +20,12 @@ glmc_mat3_identity(mat3 mat) { glm_mat3_identity(mat); } +CGLM_EXPORT +void +glmc_mat3_zero(mat3 mat) { + glm_mat3_zero(mat); +} + CGLM_EXPORT void glmc_mat3_identity_array(mat3 * __restrict mat, size_t count) { diff --git a/src/mat4.c b/src/mat4.c index c648a6e..a9f39c6 100644 --- a/src/mat4.c +++ b/src/mat4.c @@ -32,6 +32,12 @@ glmc_mat4_identity_array(mat4 * __restrict mat, size_t count) { glm_mat4_identity_array(mat, count); } +CGLM_EXPORT +void +glmc_mat4_zero(mat4 mat) { + glm_mat4_zero(mat); +} + CGLM_EXPORT void glmc_mat4_pick3(mat4 mat, mat3 dest) { diff --git a/src/quat.c b/src/quat.c index bd8c13b..f992f7c 100644 --- a/src/quat.c +++ b/src/quat.c @@ -59,7 +59,7 @@ glmc_quat_normalize_to(versor q, versor dest) { CGLM_EXPORT void glmc_quat_normalize(versor q) { - glm_quat_norm(q); + glm_quat_normalize(q); } CGLM_EXPORT @@ -184,14 +184,14 @@ glmc_quat_look(vec3 eye, versor ori, mat4 dest) { CGLM_EXPORT void -glmc_quat_for(vec3 dir, vec3 fwd, vec3 up, versor dest) { - glm_quat_for(dir, fwd, up, dest); +glmc_quat_for(vec3 dir, vec3 up, versor dest) { + glm_quat_for(dir, up, dest); } CGLM_EXPORT void -glmc_quat_forp(vec3 from, vec3 to, vec3 fwd, vec3 up, versor dest) { - glm_quat_forp(from, to, fwd, up, dest); +glmc_quat_forp(vec3 from, vec3 to, vec3 up, versor dest) { + glm_quat_forp(from, to, up, dest); } CGLM_EXPORT diff --git a/src/vec3.c b/src/vec3.c index 4ce7112..a09a2ef 100644 --- a/src/vec3.c +++ b/src/vec3.c @@ -239,13 +239,13 @@ glmc_vec3_distance2(vec3 a, vec3 b) { CGLM_EXPORT void glmc_vec3_maxv(vec3 a, vec3 b, vec3 dest) { - glm_vec3_minv(a, b, dest); + glm_vec3_maxv(a, b, dest); } CGLM_EXPORT void glmc_vec3_minv(vec3 a, vec3 b, vec3 dest) { - glm_vec3_maxv(a, b, dest); + glm_vec3_minv(a, b, dest); } CGLM_EXPORT diff --git a/src/vec4.c b/src/vec4.c index f08cd94..60c3a25 100644 --- a/src/vec4.c +++ b/src/vec4.c @@ -203,13 +203,13 @@ glmc_vec4_distance2(vec4 a, vec4 b) { CGLM_EXPORT void glmc_vec4_maxv(vec4 a, vec4 b, vec4 dest) { - glm_vec4_minv(a, b, dest); + glm_vec4_maxv(a, b, dest); } CGLM_EXPORT void glmc_vec4_minv(vec4 a, vec4 b, vec4 dest) { - glm_vec4_maxv(a, b, dest); + glm_vec4_minv(a, b, dest); } CGLM_EXPORT diff --git a/test/include/common.h b/test/include/common.h new file mode 100644 index 0000000..3c32b9f --- /dev/null +++ b/test/include/common.h @@ -0,0 +1,119 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#ifndef tests_common_h +#define tests_common_h + +#include +#include +#include + +#include +#include +#include + +typedef struct test_status_t { + const char *msg; + int status; +} test_status_t; + +typedef test_status_t (*fntest)(void); + +typedef struct test_entry_t { + char *name; + fntest entry; + int ret; + int show_output; +} test_entry_t; + +#define RESET "\033[0m" +#define BLACK "\033[30m" /* Black */ +#define RED "\033[31m" /* Red */ +#define GREEN "\033[32m" /* Green */ +#define YELLOW "\033[33m" /* Yellow */ +#define BLUE "\033[34m" /* Blue */ +#define MAGENTA "\033[35m" /* Magenta */ +#define CYAN "\033[36m" /* Cyan */ +#define WHITE "\033[37m" /* White */ +#define BOLDBLACK "\033[1m\033[30m" /* Bold Black */ +#define BOLDRED "\033[1m\033[31m" /* Bold Red */ +#define BOLDGREEN "\033[1m\033[32m" /* Bold Green */ +#define BOLDYELLOW "\033[1m\033[33m" /* Bold Yellow */ +#define BOLDBLUE "\033[1m\033[34m" /* Bold Blue */ +#define BOLDMAGENTA "\033[1m\033[35m" /* Bold Magenta */ +#define BOLDCYAN "\033[1m\033[36m" /* Bold Cyan */ +#define BOLDWHITE "\033[1m\033[37m" /* Bold White */ + +#define TEST_DECLARE(FUN) test_status_t test_ ## FUN(void); +#define TEST_ENTRY(FUN) { #FUN, test_ ## FUN, 0, 0 }, +#define TEST_LIST static test_entry_t tests[] = + +/* __VA_ARGS__ workaround for MSVC: https://stackoverflow.com/a/5134656 */ +#define EXPAND(x) x + +#define TEST_OK 1 +#define TEST_SUCCESS return (test_status_t){NULL, TEST_OK}; + +#define TEST_IMPL_ARG1(FUN) \ + test_status_t test_ ## FUN (void); \ + test_status_t test_ ## FUN() + +#define TEST_IMPL_ARG2(PREFIX, FUN) TEST_IMPL_ARG1(PREFIX ## FUN) +#define TEST_IMPL_ARG3(arg1, arg2, arg3, ...) arg3 + +#define TEST_IMPL_CHOOSER(...) \ + EXPAND(TEST_IMPL_ARG3(__VA_ARGS__, TEST_IMPL_ARG2, TEST_IMPL_ARG1)) + +#define TEST_IMPL(...) EXPAND(TEST_IMPL_CHOOSER(__VA_ARGS__)(__VA_ARGS__)) + +#define ASSERT_EXT(expr, msg) \ + if (!(expr)) { \ + fprintf(stderr, \ + RED " assert fail" RESET \ + " in " BOLDCYAN "%s " RESET \ + "on " BOLDMAGENTA "line %d" RESET \ + " : " BOLDWHITE " ASSERT(%s)\n" RESET, \ + __FILE__, \ + __LINE__, \ + #expr); \ + return (test_status_t){msg, 0}; \ + } + +#define ASSERT_ARG1(expr) ASSERT_EXT(expr, NULL) +#define ASSERT_ARG2(expr, msg) ASSERT_EXT(expr, msg) +#define ASSERT_ARG3(arg1, arg2, arg3, ...) arg3 + +#define ASSERT_CHOOSER(...) ASSERT_ARG3(__VA_ARGS__, ASSERT_ARG2, ASSERT_ARG1) +#define ASSERT(...) do { ASSERT_CHOOSER(__VA_ARGS__)(__VA_ARGS__) } while(0); +#define ASSERTIFY(expr) do { \ + test_status_t ts; \ + ts = expr; \ + if (ts.status != TEST_OK) { \ + fprintf(stderr, \ + RED " assert fail" RESET \ + " in " BOLDCYAN "%s " RESET \ + "on " BOLDMAGENTA "line %d" RESET \ + " : " BOLDWHITE " ASSERTIFY(%s)\n" RESET, \ + __FILE__, \ + __LINE__, \ + #expr); \ + return (test_status_t){ts.msg, 0}; \ + } \ + } while(0); + +#if defined(_WIN32) +# define drand48() ((float)(rand() / (RAND_MAX + 1.0))) +# define OK_TEXT "ok:" +# define FAIL_TEXT "fail:" +# define FINAL_TEXT "^_^" +#else +# define OK_TEXT "✔︎" +# define FAIL_TEXT "𐄂" +# define FINAL_TEXT "🎉" +#endif + +#endif /* common_h */ diff --git a/test/lib/cmocka b/test/lib/cmocka deleted file mode 160000 index 5f61d2f..0000000 --- a/test/lib/cmocka +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 5f61d2f188b0254f6ad74f9d9f84705923ece1fd diff --git a/test/runner.c b/test/runner.c new file mode 100644 index 0000000..9a5fd81 --- /dev/null +++ b/test/runner.c @@ -0,0 +1,98 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#include "include/common.h" +#include "tests.h" + +#include +#include +#include + +int +main(int argc, const char * argv[]) { + test_entry_t *entry; + test_status_t st; + int32_t i, count, passed, failed, maxlen; + double start, end, elapsed, total; + + passed = failed = maxlen = 0; + total = 0.0; + count = sizeof(tests) / sizeof(tests[0]); + + fprintf(stderr, CYAN "\nWelcome to cglm tests\n\n" RESET); + + srand((unsigned int)time(NULL)); + + for (i = 0; i < count; i++) { + int32_t len; + + entry = tests + i; + len = (int32_t)strlen(entry->name); + + maxlen = GLM_MAX(maxlen, len); + } + + maxlen += 5; + + fprintf(stderr, + BOLDWHITE " %-*s %-*s\n", + maxlen, "Test Name", maxlen, "Elapsed Time"); + + for (i = 0; i < count; i++) { + entry = tests + i; + start = clock(); + st = entry->entry(); + end = clock(); + elapsed = (end - start) / CLOCKS_PER_SEC; + total += elapsed; + + if (!st.status) { + fprintf(stderr, + BOLDRED " " FAIL_TEXT BOLDWHITE " %s " RESET, entry->name); + if (st.msg) { + fprintf(stderr, + YELLOW "- %s" RESET, + st.msg); + } + + fprintf(stderr, "\n"); + + failed++; + } else { + fprintf(stderr, GREEN " " OK_TEXT RESET " %-*s ", maxlen, entry->name); + + if (elapsed > 0.01) + fprintf(stderr, YELLOW "%.2fs", elapsed); + else + fprintf(stderr, "0"); + + fprintf(stderr, "\n" RESET); + passed++; + } + } + + if (failed == 0) { + fprintf(stderr, + BOLDGREEN "\n All tests are passed " FINAL_TEXT "\n" RESET); + } + + fprintf(stderr, + CYAN "\ncglm test results (%0.2fs):\n" RESET + "--------------------------\n" + + MAGENTA "%d" RESET " tests are runned, " + GREEN "%d" RESET " %s passed, " + RED "%d" RESET " %s failed\n\n" RESET, + total, + count, + passed, + passed > 1 ? "are" : "is", + failed, + failed > 1 ? "are" : "is"); + + return failed; +} diff --git a/test/src/test_affine.c b/test/src/test_affine.c deleted file mode 100644 index bb1be15..0000000 --- a/test/src/test_affine.c +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (c), Recep Aslantas. - * - * MIT License (MIT), http://opensource.org/licenses/MIT - * Full license can be found in the LICENSE file - */ - -#include "test_common.h" - -void -test_affine(void **state) { - mat4 t1, t2, t3, t4, t5; - - /* test translate is postmultiplied */ - glmc_rotate_make(t1, GLM_PI_4f, GLM_YUP); - glm_translate_make(t2, (vec3){34, 57, 36}); - - glmc_mat4_mul(t1, t2, t3); /* R * T */ - - glm_translate(t1, (vec3){34, 57, 36}); - test_assert_mat4_eq(t1, t3); - - /* test rotate is postmultiplied */ - glmc_rotate_make(t1, GLM_PI_4f, GLM_YUP); - glm_translate_make(t2, (vec3){34, 57, 36}); - - glmc_mat4_mul(t2, t1, t3); /* T * R */ - - glm_rotate(t2, GLM_PI_4f, GLM_YUP); - test_assert_mat4_eq(t2, t3); - - /* test scale is postmultiplied */ - glmc_rotate_make(t1, GLM_PI_4f, GLM_YUP); - glm_translate_make(t2, (vec3){34, 57, 36}); - glm_scale_make(t4, (vec3){3, 5, 6}); - - glmc_mat4_mul(t2, t1, t3); /* T * R */ - glmc_mat4_mul(t3, t4, t5); /* T * R * S */ - - glm_scale(t3, (vec3){3, 5, 6}); - test_assert_mat4_eq(t3, t5); - - /* test translate_x */ - glmc_rotate_make(t1, GLM_PI_4f, GLM_YUP); - glm_translate_make(t2, (vec3){34, 0, 0}); - - glmc_mat4_mul(t1, t2, t3); /* R * T */ - glm_translate_x(t1, 34); - test_assert_mat4_eq(t1, t3); - - /* test translate_y */ - glmc_rotate_make(t1, GLM_PI_4f, GLM_YUP); - glm_translate_make(t2, (vec3){0, 57, 0}); - - glmc_mat4_mul(t1, t2, t3); /* R * T */ - glm_translate_y(t1, 57); - test_assert_mat4_eq(t1, t3); - - /* test translate_z */ - glmc_rotate_make(t1, GLM_PI_4f, GLM_YUP); - glm_translate_make(t2, (vec3){0, 0, 36}); - - glmc_mat4_mul(t1, t2, t3); /* R * T */ - glm_translate_z(t1, 36); - test_assert_mat4_eq(t1, t3); - - /* test rotate_x */ - glmc_rotate_make(t1, GLM_PI_4f, (vec3){1, 0, 0}); - glm_translate_make(t2, (vec3){34, 57, 36}); - - glmc_mat4_mul(t2, t1, t3); /* T * R */ - - glm_rotate_x(t2, GLM_PI_4f, t2); - test_assert_mat4_eq(t2, t3); - - /* test rotate_y */ - glmc_rotate_make(t1, GLM_PI_4f, (vec3){0, 1, 0}); - glm_translate_make(t2, (vec3){34, 57, 36}); - - glmc_mat4_mul(t2, t1, t3); /* T * R */ - - glm_rotate_y(t2, GLM_PI_4f, t2); - test_assert_mat4_eq(t2, t3); - - /* test rotate_z */ - glmc_rotate_make(t1, GLM_PI_4f, (vec3){0, 0, 1}); - glm_translate_make(t2, (vec3){34, 57, 36}); - - glmc_mat4_mul(t2, t1, t3); /* T * R */ - - glm_rotate_z(t2, GLM_PI_4f, t2); - test_assert_mat4_eq(t2, t3); - - /* test rotate */ - glmc_rotate_make(t1, GLM_PI_4f, (vec3){0, 0, 1}); - glm_translate_make(t2, (vec3){34, 57, 36}); - - glmc_mat4_mul(t2, t1, t3); /* T * R */ - glmc_rotate(t2, GLM_PI_4f, (vec3){0, 0, 1}); - - test_assert_mat4_eq(t3, t2); - - /* test scale_uni */ - glmc_rotate_make(t1, GLM_PI_4f, GLM_YUP); - glm_translate_make(t2, (vec3){34, 57, 36}); - glm_scale_make(t4, (vec3){3, 3, 3}); - - glmc_mat4_mul(t2, t1, t3); /* T * R */ - glmc_mat4_mul(t3, t4, t5); /* T * R * S */ - - glm_scale_uni(t3, 3); - test_assert_mat4_eq(t3, t5); -} diff --git a/test/src/test_affine.h b/test/src/test_affine.h new file mode 100644 index 0000000..0487716 --- /dev/null +++ b/test/src/test_affine.h @@ -0,0 +1,634 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#include "test_common.h" + +TEST_IMPL(GLM_PREFIX, translate) { + mat4 m1; + vec4 v1 = {1.0f, 2.0f, 3.0f, 1.0f}, v2; + + glm_mat4_identity(m1); + GLM(translate)(m1, (vec3){13.0f, 11.0f, 7.0f}); + glm_mat4_mulv(m1, v1, v2); + + ASSERT(test_eq(v2[0], 14.0f)) + ASSERT(test_eq(v2[1], 13.0f)) + ASSERT(test_eq(v2[2], 10.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + glm_mat4_identity(m1); + GLM(translate)(m1, (vec3){1.0f, -1.0f, -5.0f}); + glm_mat4_mulv(m1, v2, v2); + + ASSERT(test_eq(v2[0], 15.0f)) + ASSERT(test_eq(v2[1], 12.0f)) + ASSERT(test_eq(v2[2], 5.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, translate_to) { + mat4 m1, m2; + vec4 v1 = {1.0f, 2.0f, 3.0f, 1.0f}, v2; + + glm_mat4_identity(m1); + GLM(translate_to)(m1, (vec3){13.0f, 11.0f, 7.0f}, m2); + glm_mat4_mulv(m2, v1, v2); + + ASSERT(test_eq(v2[0], 14.0f)) + ASSERT(test_eq(v2[1], 13.0f)) + ASSERT(test_eq(v2[2], 10.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + glm_mat4_identity(m1); + GLM(translate_to)(m1, (vec3){1.0f, -1.0f, -5.0f}, m2); + glm_mat4_mulv(m2, v2, v2); + + ASSERT(test_eq(v2[0], 15.0f)) + ASSERT(test_eq(v2[1], 12.0f)) + ASSERT(test_eq(v2[2], 5.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, translate_x) { + mat4 m1; + vec4 v1 = {1.0f, 2.0f, 3.0f, 1.0f}, v2; + + glm_mat4_identity(m1); + GLM(translate_x)(m1, 13.0f); + glm_mat4_mulv(m1, v1, v2); + + ASSERT(test_eq(v2[0], 14.0f)) + ASSERT(test_eq(v2[1], 2.0f)) + ASSERT(test_eq(v2[2], 3.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + glm_mat4_identity(m1); + GLM(translate_x)(m1, -1.0f); + glm_mat4_mulv(m1, v2, v2); + + ASSERT(test_eq(v2[0], 13.0f)) + ASSERT(test_eq(v2[1], 2.0f)) + ASSERT(test_eq(v2[2], 3.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, translate_y) { + mat4 m1; + vec4 v1 = {1.0f, 2.0f, 3.0f, 1.0f}, v2; + + glm_mat4_identity(m1); + GLM(translate_y)(m1, 11.0f); + glm_mat4_mulv(m1, v1, v2); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 13.0f)) + ASSERT(test_eq(v2[2], 3.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + glm_mat4_identity(m1); + GLM(translate_y)(m1, -1.0f); + glm_mat4_mulv(m1, v2, v2); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 12.0f)) + ASSERT(test_eq(v2[2], 3.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, translate_z) { + mat4 m1; + vec4 v1 = {1.0f, 2.0f, 3.0f, 1.0f}, v2; + + glm_mat4_identity(m1); + GLM(translate_z)(m1, 7.0f); + glm_mat4_mulv(m1, v1, v2); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 2.0f)) + ASSERT(test_eq(v2[2], 10.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + glm_mat4_identity(m1); + GLM(translate_z)(m1, -5.0f); + glm_mat4_mulv(m1, v2, v2); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 2.0f)) + ASSERT(test_eq(v2[2], 5.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, translate_make) { + mat4 m1; + vec4 v1 = {1.0f, 2.0f, 3.0f, 1.0f}, v2; + + glm_mat4_identity(m1); + GLM(translate_make)(m1, (vec3){13.0f, 11.0f, 7.0f}); + glm_mat4_mulv(m1, v1, v2); + + ASSERT(test_eq(v2[0], 14.0f)) + ASSERT(test_eq(v2[1], 13.0f)) + ASSERT(test_eq(v2[2], 10.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + glm_mat4_identity(m1); + GLM(translate_make)(m1, (vec3){1.0f, -1.0f, -5.0f}); + glm_mat4_mulv(m1, v2, v2); + + ASSERT(test_eq(v2[0], 15.0f)) + ASSERT(test_eq(v2[1], 12.0f)) + ASSERT(test_eq(v2[2], 5.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, scale_to) { + mat4 m1, m2; + vec4 v1 = {1.0f, 2.0f, 3.0f, 1.0f}, v2; + + glm_mat4_identity(m1); + GLM(scale_to)(m1, (vec3){13.0f, 11.0f, 7.0f}, m2); + glm_mat4_mulv(m2, v1, v2); + + ASSERT(test_eq(v2[0], 13.0f)) + ASSERT(test_eq(v2[1], 22.0f)) + ASSERT(test_eq(v2[2], 21.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + glm_mat4_identity(m1); + GLM(scale_to)(m1, (vec3){1.0f, -1.0f, -5.0f}, m2); + glm_mat4_mulv(m2, v2, v2); + + ASSERT(test_eq(v2[0], 13.0f)) + ASSERT(test_eq(v2[1], -22.0f)) + ASSERT(test_eq(v2[2], -105.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, scale_make) { + mat4 m1; + vec4 v1 = {1.0f, 2.0f, 3.0f, 1.0f}, v2; + + GLM(scale_make)(m1, (vec3){13.0f, 11.0f, 7.0f}); + glm_mat4_mulv(m1, v1, v2); + + ASSERT(test_eq(v2[0], 13.0f)) + ASSERT(test_eq(v2[1], 22.0f)) + ASSERT(test_eq(v2[2], 21.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + GLM(scale_make)(m1, (vec3){1.0f, -1.0f, -5.0f}); + glm_mat4_mulv(m1, v2, v2); + + ASSERT(test_eq(v2[0], 13.0f)) + ASSERT(test_eq(v2[1], -22.0f)) + ASSERT(test_eq(v2[2], -105.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, scale) { + mat4 m1; + vec4 v1 = {1.0f, 2.0f, 3.0f, 1.0f}, v2; + + glm_mat4_identity(m1); + GLM(scale)(m1, (vec3){13.0f, 11.0f, 7.0f}); + glm_mat4_mulv(m1, v1, v2); + + ASSERT(test_eq(v2[0], 13.0f)) + ASSERT(test_eq(v2[1], 22.0f)) + ASSERT(test_eq(v2[2], 21.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + glm_mat4_identity(m1); + GLM(scale)(m1, (vec3){1.0f, -1.0f, -5.0f}); + glm_mat4_mulv(m1, v2, v2); + + ASSERT(test_eq(v2[0], 13.0f)) + ASSERT(test_eq(v2[1], -22.0f)) + ASSERT(test_eq(v2[2], -105.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, scale_uni) { + mat4 m1; + vec4 v1 = {1.0f, 2.0f, 3.0f, 1.0f}, v2; + + glm_mat4_identity(m1); + GLM(scale_uni)(m1, 13.0f); + glm_mat4_mulv(m1, v1, v2); + + ASSERT(test_eq(v2[0], 13.0f)) + ASSERT(test_eq(v2[1], 26.0f)) + ASSERT(test_eq(v2[2], 39.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + glm_mat4_identity(m1); + GLM(scale_uni)(m1, -5.0f); + glm_mat4_mulv(m1, v2, v2); + + ASSERT(test_eq(v2[0], -65.0f)) + ASSERT(test_eq(v2[1], -130.0f)) + ASSERT(test_eq(v2[2], -195.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, rotate_x) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + vec4 v1 = {0.0f, 1.0f, 0.0f, 1.0f}, v2 = {0.0f, 1.0f, 0.0f, 1.0f}; + + GLM(rotate_x)(m1, GLM_PI_2f, m1); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 1.0f)) + + glm_vec3_copy(v2, v1); + + GLM(rotate_x)(m1, GLM_PI_2f, m1); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], -1.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + glm_vec3_copy(v2, v1); + + GLM(rotate_x)(m1, GLM_PI_2f, m1); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, rotate_y) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + vec4 v1 = {1.0f, 0.0f, 0.0f, 1.0f}, v2 = {1.0f, 0.0f, 0.0f, 1.0f}; + + GLM(rotate_y)(m1, GLM_PI_2f, m1); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -1.0f)) + + glm_vec3_copy(v2, v1); + + GLM(rotate_y)(m1, GLM_PI_2f, m1); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], -1.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + glm_vec3_copy(v2, v1); + + GLM(rotate_y)(m1, GLM_PI_2f, m1); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, rotate_z) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + vec4 v1 = {0.0f, 1.0f, 0.0f, 1.0f}, v2 = {0.0f, 1.0f, 0.0f, 1.0f}; + + GLM(rotate_z)(m1, GLM_PI_2f, m1); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], -1.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + glm_vec3_copy(v2, v1); + + GLM(rotate_z)(m1, GLM_PI_2f, m1); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], -1.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + glm_vec3_copy(v2, v1); + + GLM(rotate_z)(m1, GLM_PI_2f, m1); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 1.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, rotate_make) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + vec4 v1 = {1.0f, 0.0f, 0.0f, 1.0f}; + + /* rotate X around Y = -Z */ + GLM(rotate_make)(m1, GLM_PI_2f, GLM_YUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -1.0f)) + + /* rotate -Z around X = Y */ + GLM(rotate_make)(m1, GLM_PI_2f, GLM_XUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 1.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + /* rotate Y around X = +Z */ + GLM(rotate_make)(m1, GLM_PI_2f, GLM_XUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, rotate) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT, m2 = GLM_MAT4_IDENTITY_INIT; + vec4 v1 = {1.0f, 0.0f, 0.0f, 1.0f}; + + /* 360 deg */ + glm_rotate(m1, GLM_PI_2f, GLM_YUP); + glm_rotate(m1, GLM_PI_2f, GLM_YUP); + glm_rotate(m1, GLM_PI_2f, GLM_YUP); + glm_rotate(m1, GLM_PI_2f, GLM_YUP); + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + + /* rotate X around Y = -Z */ + GLM(rotate)(m1, GLM_PI_2f, GLM_YUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -1.0f)) + + glm_mat4_identity(m1); + + /* rotate -Z around X = Y */ + GLM(rotate)(m1, GLM_PI_2f, GLM_XUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 1.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + glm_mat4_identity(m1); + + /* rotate Y around X = +Z */ + GLM(rotate)(m1, GLM_PI_2f, GLM_XUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, rotate_at) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + vec4 v1 = {1.0f, 0.0f, 0.0f, 1.0f}; + + GLM(rotate_at)(m1, (vec3){0.5f, 0.0f, 0.0f}, GLM_PI_2f, GLM_YUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.5f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -0.5f)) + + glm_mat4_identity(m1); + + GLM(rotate_at)(m1, GLM_VEC3_ZERO, GLM_PI_2f, GLM_ZUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.5f)) + ASSERT(test_eq(v1[2], -0.5f)) + + glm_mat4_identity(m1); + + v1[0] = 1.0f; + v1[1] = 1.0f; + v1[2] = 1.0f; + + GLM(rotate_at)(m1, GLM_VEC3_ZERO, GLM_PI_2f, GLM_XUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 1.0f)) + ASSERT(test_eq(v1[1], -1.0f)) + ASSERT(test_eq(v1[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, rotate_atm) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + vec4 v1 = {1.0f, 0.0f, 0.0f, 1.0f}; + + GLM(rotate_atm)(m1, (vec3){0.5f, 0.0f, 0.0f}, GLM_PI_2f, GLM_YUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.5f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -0.5f)) + + GLM(rotate_atm)(m1, GLM_VEC3_ZERO, GLM_PI_2f, GLM_ZUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.5f)) + ASSERT(test_eq(v1[2], -0.5f)) + + v1[0] = 1.0f; + v1[1] = 1.0f; + v1[2] = 1.0f; + + GLM(rotate_atm)(m1, GLM_VEC3_ZERO, GLM_PI_2f, GLM_XUP); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 1.0f)) + ASSERT(test_eq(v1[1], -1.0f)) + ASSERT(test_eq(v1[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, decompose_scalev) { + mat4 m1; + vec3 s1; + + GLM(scale_make)(m1, (vec3){7.0f, 8.0f, 9.0f}); + GLM(decompose_scalev)(m1, s1); + + ASSERT(test_eq(s1[0], 7.0f)) + ASSERT(test_eq(s1[1], 8.0f)) + ASSERT(test_eq(s1[2], 9.0f)) + + GLM(scale)(m1, (vec3){7.0f, 8.0f, 9.0f}); + GLM(decompose_scalev)(m1, s1); + + ASSERT(test_eq(s1[0], 49.0f)) + ASSERT(test_eq(s1[1], 64.0f)) + ASSERT(test_eq(s1[2], 81.0f)) + + glm_rotate(m1, GLM_PI_4f, (vec3){23.0f, 45.0f, 66.0f}); + ASSERT(test_eq(s1[0], 49.0f)) + ASSERT(test_eq(s1[1], 64.0f)) + ASSERT(test_eq(s1[2], 81.0f)) + + glm_translate(m1, (vec3){4.0f, 5.0f, 6.0f}); + ASSERT(test_eq(s1[0], 49.0f)) + ASSERT(test_eq(s1[1], 64.0f)) + ASSERT(test_eq(s1[2], 81.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, uniscaled) { + mat4 m1; + + GLM(scale_make)(m1, (vec3){7.0f, 8.0f, 9.0f}); + ASSERT(!GLM(uniscaled)(m1)) + + GLM(scale_make)(m1, (vec3){7.0f, 7.0f, 7.0f}); + ASSERT(GLM(uniscaled)(m1)) + + glm_rotate(m1, GLM_PI_4f, (vec3){23.0f, 45.0f, 66.0f}); + ASSERT(GLM(uniscaled)(m1)) + + glm_translate(m1, (vec3){4.0f, 5.0f, 6.0f}); + ASSERT(GLM(uniscaled)(m1)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, decompose_rs) { + mat4 m1, m2, r; + vec3 s1; + + GLM(scale_make)(m1, (vec3){7.0f, 8.0f, 9.0f}); + GLM(decompose_rs)(m1, r, s1); + + ASSERT(test_eq(s1[0], 7.0f)) + ASSERT(test_eq(s1[1], 8.0f)) + ASSERT(test_eq(s1[2], 9.0f)) + ASSERTIFY(test_assert_mat4_eq_identity(r)); + + GLM(scale)(m1, (vec3){7.0f, 8.0f, 9.0f}); + GLM(decompose_rs)(m1, r, s1); + + ASSERT(test_eq(s1[0], 49.0f)) + ASSERT(test_eq(s1[1], 64.0f)) + ASSERT(test_eq(s1[2], 81.0f)) + ASSERTIFY(test_assert_mat4_eq_identity(r)); + + glm_rotate(m1, GLM_PI_4f, (vec3){23.0f, 45.0f, 66.0f}); + ASSERT(test_eq(s1[0], 49.0f)) + ASSERT(test_eq(s1[1], 64.0f)) + ASSERT(test_eq(s1[2], 81.0f)) + GLM(decompose_rs)(m1, r, s1); + + glm_mat4_identity(m2); + glm_mat4_mul(m2, r, m2); + glm_scale(m2, s1); + + ASSERTIFY(test_assert_mat4_eq2(m1, m2, 0.00001)); + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, decompose) { + mat4 m1, m2, r; + vec4 t; + vec3 s; + + GLM(scale_make)(m1, (vec3){7.0f, 8.0f, 9.0f}); + GLM(decompose)(m1, t, r, s); + + ASSERT(test_eq(s[0], 7.0f)) + ASSERT(test_eq(s[1], 8.0f)) + ASSERT(test_eq(s[2], 9.0f)) + ASSERTIFY(test_assert_mat4_eq_identity(r)); + + GLM(scale)(m1, (vec3){7.0f, 8.0f, 9.0f}); + GLM(decompose)(m1, t, r, s); + + ASSERT(test_eq(s[0], 49.0f)) + ASSERT(test_eq(s[1], 64.0f)) + ASSERT(test_eq(s[2], 81.0f)) + ASSERTIFY(test_assert_mat4_eq_identity(r)); + + glm_rotate(m1, GLM_PI_4f, (vec3){23.0f, 45.0f, 66.0f}); + ASSERT(test_eq(s[0], 49.0f)) + ASSERT(test_eq(s[1], 64.0f)) + ASSERT(test_eq(s[2], 81.0f)) + GLM(decompose)(m1, t, r, s); + + glm_mat4_identity(m2); + glm_mat4_mul(m2, r, m2); + glm_scale(m2, s); + + ASSERTIFY(test_assert_mat4_eq2(m1, m2, 0.00001)); + + glm_mat4_identity(m1); + glm_translate(m1, (vec3){56.0f, 13.0f, 90.0f}); + glm_rotate(m1, GLM_PI_4f, (vec3){23.0f, 45.0f, 66.0f}); + glm_scale(m1, (vec3){12.0f, 34.0f, 23.0f}); + + GLM(decompose)(m1, t, r, s); + + ASSERT(test_eq(t[0], 56.0f)) + ASSERT(test_eq(t[1], 13.0f)) + ASSERT(test_eq(t[2], 90.0f)) + + ASSERT(test_eq(s[0], 12.0f)) + ASSERT(test_eq(s[1], 34.0f)) + ASSERT(test_eq(s[2], 23.0f)) + + glm_mat4_identity(m2); + glm_translate(m2, t); + glm_mat4_mul(m2, r, m2); + glm_scale(m2, s); + ASSERTIFY(test_assert_mat4_eq2(m1, m2, 0.00001)); + + TEST_SUCCESS +} diff --git a/test/src/test_affine_mat.h b/test/src/test_affine_mat.h new file mode 100644 index 0000000..1c1127f --- /dev/null +++ b/test/src/test_affine_mat.h @@ -0,0 +1,87 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#include "test_common.h" + +TEST_IMPL(GLM_PREFIX, mul) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + mat4 m2 = GLM_MAT4_IDENTITY_INIT; + mat4 m3; + mat4 m4 = GLM_MAT4_ZERO_INIT; + int i, j, k; + + test_rand_mat4(m1); + test_rand_mat4(m2); + + GLM(mul)(m1, m2, m3); + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + for (k = 0; k < 4; k++) + /* column-major */ + m4[i][j] += m1[k][j] * m2[i][k]; + } + } + + ASSERTIFY(test_assert_mat4_eq(m3, m4)) + + /* test pre compiled */ + GLM(mul)(m1, m2, m3); + ASSERTIFY(test_assert_mat4_eq(m3, m4)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mul_rot) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + mat4 m2 = GLM_MAT4_IDENTITY_INIT; + mat4 m3; + mat4 m4 = GLM_MAT4_ZERO_INIT; + int i, j, k; + + glm_rotate(m1, drand48(), (vec3){drand48(), drand48(), drand48()}); + glm_rotate(m2, drand48(), (vec3){drand48(), drand48(), drand48()}); + + GLM(mul_rot)(m1, m2, m3); + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + for (k = 0; k < 4; k++) + /* column-major */ + m4[i][j] += m1[k][j] * m2[i][k]; + } + } + + ASSERTIFY(test_assert_mat4_eq(m3, m4)) + + /* test pre compiled */ + GLM(mul_rot)(m1, m2, m3); + ASSERTIFY(test_assert_mat4_eq(m3, m4)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, inv_tr) { + mat4 m1, m2; + int i; + + for (i = 0; i < 10000; i++) { + test_rand_mat4(m1); + + glm_mat4_copy(m1, m2); + + /* test inverse precise */ + GLM(inv_tr)(m1); + GLM(inv_tr)(m1); + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + + /* test inverse precise */ + GLM(mat4_inv)(m1, m2); + GLM(inv_tr)(m2); + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + } + + TEST_SUCCESS +} diff --git a/test/src/test_bezier.c b/test/src/test_bezier.c index 1c7bb11..3089dd5 100644 --- a/test/src/test_bezier.c +++ b/test/src/test_bezier.c @@ -35,8 +35,7 @@ test_hermite_plain(float s, float p0, float t0, float t1, float p1) { + t1 * (sss - ss); } -void -test_bezier(void **state) { +TEST_IMPL(bezier) { float s, p0, p1, c0, c1, smc, Bs, Bs_plain; s = test_rand(); @@ -50,16 +49,18 @@ test_bezier(void **state) { Bs = glm_bezier(s, p0, c0, c1, p1); Bs_plain = test_bezier_plain(s, p0, c0, c1, p1); - assert_true(glm_eq(Bs, Bs_plain)); - test_assert_eqf(smc, Bs_plain); - test_assert_eqf(Bs, smc); + ASSERT(test_eq(Bs, Bs_plain)); + ASSERTIFY(test_assert_eqf(smc, Bs_plain)) + ASSERTIFY(test_assert_eqf(Bs, smc)) /* test cubic hermite */ smc = glm_smc(s, GLM_HERMITE_MAT, (vec4){p0, p1, c0, c1}); Bs = glm_hermite(s, p0, c0, c1, p1); Bs_plain = test_hermite_plain(s, p0, c0, c1, p1); - assert_true(glm_eq(Bs, Bs_plain)); - assert_true(glm_eq(smc, Bs_plain)); - assert_true(glm_eq(Bs, smc)); + ASSERT(test_eq(Bs, Bs_plain)); + ASSERT(test_eq(smc, Bs_plain)); + ASSERT(test_eq(Bs, smc)); + + TEST_SUCCESS } diff --git a/test/src/test_cam.c b/test/src/test_cam.c index c1f31e1..f444bdd 100644 --- a/test/src/test_cam.c +++ b/test/src/test_cam.c @@ -7,25 +7,24 @@ #include "test_common.h" -void -test_camera_lookat(void **state) { - mat4 view1, view2; +TEST_IMPL(camera_lookat) { + mat4 view1, view2; vec3 center, - eye = {0.024f, 14.6f, 67.04f}, - dir = {0.0f, 0.0f, -1.0f}, - up = {0.0f, 1.0f, 0.0f} - ; + eye = {0.024f, 14.6f, 67.04f}, + dir = {0.0f, 0.0f, -1.0f}, + up = {0.0f, 1.0f, 0.0f}; glm_vec3_add(eye, dir, center); glm_lookat(eye, center, up, view1); glm_look(eye, dir, up, view2); - test_assert_mat4_eq(view1, view2); + ASSERTIFY(test_assert_mat4_eq(view1, view2)) + + TEST_SUCCESS } -void -test_camera_decomp(void **state) { +TEST_IMPL(camera_decomp) { mat4 proj, proj2; vec4 sizes; float aspect, fovy, nearVal, farVal; @@ -36,19 +35,21 @@ test_camera_decomp(void **state) { farVal = 100.0f; glm_perspective(fovy, aspect, nearVal, farVal, proj); - assert_true(fabsf(aspect - glm_persp_aspect(proj)) < FLT_EPSILON); - assert_true(fabsf(fovy - glm_persp_fovy(proj)) < FLT_EPSILON); - assert_true(fabsf(49.984f - glm_deg(glm_persp_fovy(proj))) < FLT_EPSILON); + ASSERT(fabsf(aspect - glm_persp_aspect(proj)) < FLT_EPSILON) + ASSERT(fabsf(fovy - glm_persp_fovy(proj)) < FLT_EPSILON) + ASSERT(fabsf(49.984f - glm_deg(glm_persp_fovy(proj))) < FLT_EPSILON) glm_persp_sizes(proj, fovy, sizes); - glm_frustum(-sizes[0] * 0.5, - sizes[0] * 0.5, - -sizes[1] * 0.5, - sizes[1] * 0.5, + glm_frustum(-sizes[0] * 0.5f, + sizes[0] * 0.5f, + -sizes[1] * 0.5f, + sizes[1] * 0.5f, nearVal, farVal, proj2); - test_assert_mat4_eq(proj, proj2); + ASSERTIFY(test_assert_mat4_eq(proj, proj2)) + + TEST_SUCCESS } diff --git a/test/src/test_clamp.c b/test/src/test_clamp.c index 70d684b..1d1c0c2 100644 --- a/test/src/test_clamp.c +++ b/test/src/test_clamp.c @@ -7,24 +7,25 @@ #include "test_common.h" -void -test_clamp(void **state) { - vec3 v3 = {15.07, 0.4, 17.3}; - vec4 v4 = {5.07, 2.3, 1.3, 1.4}; +TEST_IMPL(clamp) { + vec3 v3 = {15.07f, 0.4f, 17.3f}; + vec4 v4 = {5.07f, 2.3f, 1.3f, 1.4f}; - assert_true(glm_clamp(1.6f, 0.0f, 1.0f) == 1.0f); - assert_true(glm_clamp(-1.6f, 0.0f, 1.0f) == 0.0f); - assert_true(glm_clamp(0.6f, 0.0f, 1.0f) == 0.6f); + ASSERT(glm_clamp(1.6f, 0.0f, 1.0f) == 1.0f) + ASSERT(glm_clamp(-1.6f, 0.0f, 1.0f) == 0.0f) + ASSERT(glm_clamp(0.6f, 0.0f, 1.0f) == 0.6f) glm_vec3_clamp(v3, 0.0, 1.0); glm_vec4_clamp(v4, 1.5, 3.0); - assert_true(v3[0] == 1.0f); - assert_true(v3[1] == 0.4f); - assert_true(v3[2] == 1.0f); + ASSERT(v3[0] == 1.0f) + ASSERT(v3[1] == 0.4f) + ASSERT(v3[2] == 1.0f) - assert_true(v4[0] == 3.0f); - assert_true(v4[1] == 2.3f); - assert_true(v4[2] == 1.5f); - assert_true(v4[3] == 1.5f); + ASSERT(v4[0] == 3.0f) + ASSERT(v4[1] == 2.3f) + ASSERT(v4[2] == 1.5f) + ASSERT(v4[3] == 1.5f) + + TEST_SUCCESS } diff --git a/test/src/test_common.c b/test/src/test_common.c index 9f8335e..6459aa8 100644 --- a/test/src/test_common.c +++ b/test/src/test_common.c @@ -4,26 +4,20 @@ */ #include "test_common.h" -#include -#include - -#define m 4 -#define n 4 +#include void test_rand_mat4(mat4 dest) { glm_mat4_copy(GLM_MAT4_IDENTITY, dest); - - srand((unsigned int)time(NULL)); /* random position */ dest[3][0] = drand48(); dest[3][1] = drand48(); dest[3][2] = drand48(); - + /* random rotatation around random axis with random angle */ glm_rotate(dest, drand48(), (vec3){drand48(), drand48(), drand48()}); - + /* random scale */ /* glm_scale(dest, (vec3){drand48(), drand48(), drand48()}); */ } @@ -32,8 +26,6 @@ void test_rand_mat3(mat3 dest) { mat4 m4; - srand((unsigned int)time(NULL)); - /* random rotatation around random axis with random angle */ glm_rotate_make(m4, drand48(), (vec3){drand48(), drand48(), drand48()}); glm_mat4_pick3(m4, dest); @@ -41,8 +33,6 @@ test_rand_mat3(mat3 dest) { void test_rand_vec3(vec3 dest) { - srand((unsigned int)time(NULL)); - dest[0] = drand48(); dest[1] = drand48(); dest[2] = drand48(); @@ -57,8 +47,6 @@ test_rand_vec3s() { void test_rand_vec4(vec4 dest) { - srand((unsigned int)time(NULL)); - dest[0] = drand48(); dest[1] = drand48(); dest[2] = drand48(); @@ -74,103 +62,221 @@ test_rand_vec4s() { float test_rand(void) { - srand((unsigned int)time(NULL)); return drand48(); } void test_rand_quat(versor q) { - srand((unsigned int)time(NULL)); - glm_quat(q, drand48(), drand48(), drand48(), drand48()); glm_quat_normalize(q); } -void +test_status_t test_assert_mat4_eq(mat4 m1, mat4 m2) { int i, j, k; - for (i = 0; i < m; i++) { - for (j = 0; j < n; j++) { - for (k = 0; k < m; k++) - assert_true(fabsf(m1[i][j] - m2[i][j]) <= 0.0000009); + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + for (k = 0; k < 4; k++) + ASSERT(fabsf(m1[i][j] - m2[i][j]) <= 0.0000009) } } + + TEST_SUCCESS } -void +test_status_t +test_assert_mat4_eqt(mat4 m1, mat4 m2) { + int i, j, k; + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + for (k = 0; k < 4; k++) + ASSERT(fabsf(m1[j][i] - m2[i][j]) <= 0.0000009) + } + } + + TEST_SUCCESS +} + +test_status_t test_assert_mat4_eq2(mat4 m1, mat4 m2, float eps) { int i, j, k; - - for (i = 0; i < m; i++) { - for (j = 0; j < n; j++) { - for (k = 0; k < m; k++) - assert_true(fabsf(m1[i][j] - m2[i][j]) <= eps); + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + for (k = 0; k < 4; k++) + ASSERT(fabsf(m1[i][j] - m2[i][j]) <= eps); } } + + TEST_SUCCESS } -void +test_status_t test_assert_mat3_eq(mat3 m1, mat3 m2) { int i, j, k; for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) - assert_true(fabsf(m1[i][j] - m2[i][j]) <= 0.0000009); + ASSERT(fabsf(m1[i][j] - m2[i][j]) <= 0.0000009); } } + + TEST_SUCCESS } -void +test_status_t +test_assert_mat3_eqt(mat3 m1, mat3 m2) { + int i, j, k; + + for (i = 0; i < 3; i++) { + for (j = 0; j < 3; j++) { + for (k = 0; k < 3; k++) + ASSERT(fabsf(m1[j][i] - m2[i][j]) <= 0.0000009); + } + } + + TEST_SUCCESS +} + +test_status_t +test_assert_mat3_eq_identity(mat3 m3) { + int i, j; + + for (i = 0; i < 3; i++) { + for (j = 0; j < 3; j++) { + if (i == j) { + ASSERT(test_eq(m3[i][j], 1.0f)) + } else { + ASSERT(test_eq(m3[i][j], 0.0f)) + } + } + } + + TEST_SUCCESS +} + +test_status_t +test_assert_mat3_eq_zero(mat3 m3) { + int i, j; + + for (i = 0; i < 3; i++) { + for (j = 0; j < 3; j++) { + ASSERT(test_eq(m3[i][j], 0.0f)) + } + } + + TEST_SUCCESS +} + +test_status_t +test_assert_mat4_eq_identity(mat4 m4) { + int i, j; + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + if (i == j) { + ASSERT(test_eq(m4[i][j], 1.0f)) + } else { + ASSERT(test_eq(m4[i][j], 0.0f)) + } + } + } + + TEST_SUCCESS +} + +test_status_t +test_assert_mat4_eq_zero(mat4 m4) { + int i, j; + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + ASSERT(test_eq(m4[i][j], 0.0f)) + } + } + + TEST_SUCCESS +} + +test_status_t test_assert_eqf(float a, float b) { - assert_true(fabsf(a - b) <= 0.000009); /* rounding errors */ + ASSERT(fabsf(a - b) <= 0.000009); /* rounding errors */ + + TEST_SUCCESS } -void +test_status_t test_assert_vec2_eq(vec2 v1, vec2 v2) { - assert_true(fabsf(v1[0] - v2[0]) <= 0.000009); /* rounding errors */ - assert_true(fabsf(v1[1] - v2[1]) <= 0.000009); + ASSERT(fabsf(v1[0] - v2[0]) <= 0.000009); /* rounding errors */ + ASSERT(fabsf(v1[1] - v2[1]) <= 0.000009); + + TEST_SUCCESS } -void +test_status_t test_assert_vec3_eq(vec3 v1, vec3 v2) { - assert_true(fabsf(v1[0] - v2[0]) <= 0.000009); /* rounding errors */ - assert_true(fabsf(v1[1] - v2[1]) <= 0.000009); - assert_true(fabsf(v1[2] - v2[2]) <= 0.000009); + ASSERT(fabsf(v1[0] - v2[0]) <= 0.000009); /* rounding errors */ + ASSERT(fabsf(v1[1] - v2[1]) <= 0.000009); + ASSERT(fabsf(v1[2] - v2[2]) <= 0.000009); + + TEST_SUCCESS } -void +test_status_t test_assert_vec3s_eq(vec3s v1, vec3s v2) { test_assert_vec3_eq(v1.raw, v2.raw); + + TEST_SUCCESS } -void +test_status_t test_assert_vec4_eq(vec4 v1, vec4 v2) { - assert_true(fabsf(v1[0] - v2[0]) <= 0.000009); /* rounding errors */ - assert_true(fabsf(v1[1] - v2[1]) <= 0.000009); - assert_true(fabsf(v1[2] - v2[2]) <= 0.000009); - assert_true(fabsf(v1[3] - v2[3]) <= 0.000009); + ASSERT(fabsf(v1[0] - v2[0]) <= 0.000009); /* rounding errors */ + ASSERT(fabsf(v1[1] - v2[1]) <= 0.000009); + ASSERT(fabsf(v1[2] - v2[2]) <= 0.000009); + ASSERT(fabsf(v1[3] - v2[3]) <= 0.000009); + + TEST_SUCCESS } -void +test_status_t test_assert_vec4s_eq(vec4s v1, vec4s v2) { test_assert_vec4_eq(v1.raw, v2.raw); + + TEST_SUCCESS } -void +test_status_t test_assert_quat_eq_abs(versor v1, versor v2) { - assert_true(fabsf(fabsf(v1[0]) - fabsf(v2[0])) <= 0.0009); /* rounding errors */ - assert_true(fabsf(fabsf(v1[1]) - fabsf(v2[1])) <= 0.0009); - assert_true(fabsf(fabsf(v1[2]) - fabsf(v2[2])) <= 0.0009); - assert_true(fabsf(fabsf(v1[3]) - fabsf(v2[3])) <= 0.0009); + ASSERT(fabsf(fabsf(v1[0]) - fabsf(v2[0])) <= 0.0009); /* rounding errors */ + ASSERT(fabsf(fabsf(v1[1]) - fabsf(v2[1])) <= 0.0009); + ASSERT(fabsf(fabsf(v1[2]) - fabsf(v2[2])) <= 0.0009); + ASSERT(fabsf(fabsf(v1[3]) - fabsf(v2[3])) <= 0.0009); + + TEST_SUCCESS } -void +test_status_t test_assert_quat_eq(versor v1, versor v2) { - assert_true(fabsf(v1[0] - v2[0]) <= 0.000009); /* rounding errors */ - assert_true(fabsf(v1[1] - v2[1]) <= 0.000009); - assert_true(fabsf(v1[2] - v2[2]) <= 0.000009); - assert_true(fabsf(v1[3] - v2[3]) <= 0.000009); + ASSERT(fabsf(v1[0] - v2[0]) <= 0.000009); /* rounding errors */ + ASSERT(fabsf(v1[1] - v2[1]) <= 0.000009); + ASSERT(fabsf(v1[2] - v2[2]) <= 0.000009); + ASSERT(fabsf(v1[3] - v2[3]) <= 0.000009); + + TEST_SUCCESS } +test_status_t +test_assert_quat_eq_identity(versor q) { + versor p = GLM_QUAT_IDENTITY_INIT; + + ASSERT(fabsf(q[0] - p[0]) <= 0.000009); /* rounding errors */ + ASSERT(fabsf(q[1] - p[1]) <= 0.000009); + ASSERT(fabsf(q[2] - p[2]) <= 0.000009); + ASSERT(fabsf(q[3] - p[3]) <= 0.000009); + + TEST_SUCCESS +} diff --git a/test/src/test_common.h b/test/src/test_common.h index 8bc08af..6dcb878 100644 --- a/test/src/test_common.h +++ b/test/src/test_common.h @@ -8,20 +8,7 @@ #ifndef test_common_h #define test_common_h -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include +#include "../include/common.h" void test_rand_mat4(mat4 dest); @@ -29,37 +16,58 @@ test_rand_mat4(mat4 dest); void test_rand_mat3(mat3 dest); -void +test_status_t test_assert_eqf(float a, float b); -void +test_status_t test_assert_mat4_eq(mat4 m1, mat4 m2); -void +test_status_t +test_assert_mat4_eqt(mat4 m1, mat4 m2); + +test_status_t test_assert_mat4_eq2(mat4 m1, mat4 m2, float eps); -void +test_status_t +test_assert_mat4_eq_identity(mat4 m4); + +test_status_t +test_assert_mat4_eq_zero(mat4 m4); + +test_status_t test_assert_mat3_eq(mat3 m1, mat3 m2); -void +test_status_t test_assert_vec2_eq(vec2 v1, vec2 v2); -void +test_status_t +test_assert_mat3_eqt(mat3 m1, mat3 m2); + +test_status_t +test_assert_mat3_eq_identity(mat3 m3); + +test_status_t +test_assert_mat3_eq_zero(mat3 m3); + +test_status_t test_assert_vec3_eq(vec3 v1, vec3 v2); -void +test_status_t test_assert_vec3s_eq(vec3s v1, vec3s v2); -void +test_status_t test_assert_vec4_eq(vec4 v1, vec4 v2); -void +test_status_t test_assert_vec4s_eq(vec4s v1, vec4s v2); -void +test_status_t test_assert_quat_eq(versor v1, versor v2); -void +test_status_t +test_assert_quat_eq_identity(versor q) ; + +test_status_t test_assert_quat_eq_abs(versor v1, versor v2); void @@ -80,4 +88,16 @@ test_rand(void); void test_rand_quat(versor q); +CGLM_INLINE +bool +test_eq(float a, float b) { + return fabsf(a - b) <= 1e-6; +} + +CGLM_INLINE +bool +test_eq_th(float a, float b, float th) { + return fabsf(a - b) <= th; +} + #endif /* test_common_h */ diff --git a/test/src/test_euler.c b/test/src/test_euler.c index b71c4cb..b71e0f0 100644 --- a/test/src/test_euler.c +++ b/test/src/test_euler.c @@ -7,10 +7,9 @@ #include "test_common.h" -void -test_euler(void **state) { - mat4 rot1, rot2; - vec3 inAngles, outAngles; +TEST_IMPL(euler) { + mat4 rot1, rot2; + vec3 inAngles, outAngles; inAngles[0] = glm_rad(-45.0f); /* X angle */ inAngles[1] = glm_rad(88.0f); /* Y angle */ @@ -22,11 +21,11 @@ test_euler(void **state) { glmc_euler_angles(rot1, outAngles); /* angles must be equal in that range */ - test_assert_vec3_eq(inAngles, outAngles); + ASSERTIFY(test_assert_vec3_eq(inAngles, outAngles)) /* matrices must be equal */ glmc_euler_xyz(outAngles, rot2); - test_assert_mat4_eq(rot1, rot2); + ASSERTIFY(test_assert_mat4_eq(rot1, rot2)) /* change range */ inAngles[0] = glm_rad(-145.0f); /* X angle */ @@ -40,5 +39,7 @@ test_euler(void **state) { /* matrices must be equal */ glmc_euler_xyz(outAngles, rot2); - test_assert_mat4_eq(rot1, rot2); + ASSERTIFY(test_assert_mat4_eq(rot1, rot2)) + + TEST_SUCCESS } diff --git a/test/src/test_main.c b/test/src/test_main.c deleted file mode 100644 index 3bdfebb..0000000 --- a/test/src/test_main.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c), Recep Aslantas. - * MIT License (MIT), http://opensource.org/licenses/MIT - */ - -#include "test_common.h" -#include "test_tests.h" - -int -main(int argc, const char * argv[]) { - const struct CMUnitTest tests[] = { - /* mat4 */ - cmocka_unit_test(test_mat4), - - /* mat3 */ - cmocka_unit_test(test_mat3), - - /* camera */ - cmocka_unit_test(test_camera_lookat), - cmocka_unit_test(test_camera_decomp), - - /* project */ - cmocka_unit_test(test_project), - - /* vector */ - cmocka_unit_test(test_clamp), - - /* euler */ - cmocka_unit_test(test_euler), - - /* quaternion */ - cmocka_unit_test(test_quat), - - /* vec4 */ - cmocka_unit_test(test_vec4), - - /* vec3 */ - cmocka_unit_test(test_vec3), - - /* affine */ - cmocka_unit_test(test_affine), - - /* bezier */ - cmocka_unit_test(test_bezier), - - /* vec2 */ - cmocka_unit_test(test_vec2) - }; - - return cmocka_run_group_tests(tests, NULL, NULL); -} diff --git a/test/src/test_mat3.c b/test/src/test_mat3.c deleted file mode 100644 index e0de7c6..0000000 --- a/test/src/test_mat3.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c), Recep Aslantas. - * - * MIT License (MIT), http://opensource.org/licenses/MIT - * Full license can be found in the LICENSE file - */ - -#include "test_common.h" - -#define m 3 -#define n 3 - -void -test_mat3(void **state) { - mat3 m1 = GLM_MAT3_IDENTITY_INIT; - mat3 m2 = GLM_MAT3_IDENTITY_INIT; - mat3 m3; - mat3 m4 = GLM_MAT3_ZERO_INIT; - mat3 m5; - int i, j, k; - - /* test identity matrix multiplication */ - glmc_mat3_mul(m1, m2, m3); - for (i = 0; i < m; i++) { - for (j = 0; j < n; j++) { - if (i == j) - assert_true(glm_eq(m3[i][j], 1.0f)); - else - assert_true(glm_eq(m3[i][j], 0.0f)); - } - } - - /* test random matrices */ - /* random matrices */ - test_rand_mat3(m1); - test_rand_mat3(m2); - - glmc_mat3_mul(m1, m2, m3); - for (i = 0; i < m; i++) { - for (j = 0; j < n; j++) { - for (k = 0; k < m; k++) - /* column-major */ - m4[i][j] += m1[k][j] * m2[i][k]; - } - } - - test_assert_mat3_eq(m3, m4); - - for (i = 0; i < 100000; i++) { - test_rand_mat3(m3); - test_rand_mat3(m4); - - /* test inverse precise */ - glmc_mat3_inv(m3, m4); - glmc_mat3_inv(m4, m5); - test_assert_mat3_eq(m3, m5); - } -} diff --git a/test/src/test_mat3.h b/test/src/test_mat3.h new file mode 100644 index 0000000..76b1786 --- /dev/null +++ b/test/src/test_mat3.h @@ -0,0 +1,311 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#include "test_common.h" + +#define A_MATRIX {{1,2,3},{5,6,7},{9,10,11}} + +TEST_IMPL(GLM_PREFIX, mat3_copy) { + mat3 m1 = A_MATRIX; + mat3 m2 = GLM_MAT3_IDENTITY_INIT; + + GLM(mat3_copy)(m1, m2); + + test_assert_mat3_eq(m1, m2); + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_identity) { + mat3 m1 = GLM_MAT3_IDENTITY_INIT; + mat3 m2 = GLM_MAT3_IDENTITY_INIT; + mat3 m3; + + GLM(mat3_identity)(m3); + + ASSERTIFY(test_assert_mat3_eq_identity(m1)) + ASSERTIFY(test_assert_mat3_eq_identity(m2)) + ASSERTIFY(test_assert_mat3_eq_identity(m3)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_identity_array) { + int i, count; + mat3 matrices[4] = { + A_MATRIX, + A_MATRIX, + A_MATRIX, + A_MATRIX + }; + + count = 4; + + GLM(mat3_identity_array)(matrices, count); + + for (i = 0; i < count; i++) { + ASSERTIFY(test_assert_mat3_eq_identity(matrices[i])) + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_zero) { + mat3 m1 = GLM_MAT3_ZERO_INIT; + mat3 m2 = GLM_MAT3_ZERO_INIT; + mat3 m3; + + GLM(mat3_zero)(m3); + + ASSERTIFY(test_assert_mat3_eq_zero(m1)) + ASSERTIFY(test_assert_mat3_eq_zero(m2)) + ASSERTIFY(test_assert_mat3_eq_zero(m3)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_mul) { + mat3 m1 = GLM_MAT3_IDENTITY_INIT; + mat3 m2 = GLM_MAT3_IDENTITY_INIT; + mat3 m3; + mat3 m4 = GLM_MAT3_ZERO_INIT; + int i, j, k; + + /* test random matrices */ + /* random matrices */ + test_rand_mat3(m1); + test_rand_mat3(m2); + + GLM(mat3_mul)(m1, m2, m3); + for (i = 0; i < 3; i++) { + for (j = 0; j < 3; j++) { + for (k = 0; k < 3; k++) + /* column-major */ + m4[i][j] += m1[k][j] * m2[i][k]; + } + } + + ASSERTIFY(test_assert_mat3_eq(m3, m4)) + + /* test pre compiled */ + GLM(mat3_mul)(m1, m2, m3); + ASSERTIFY(test_assert_mat3_eq(m3, m4)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_mulv) { + vec4 res; + mat3 mat = A_MATRIX; + vec4 v = {1.0f, 2.0f, 3.0f, 4.0f}; + int i; + + GLM(mat3_mulv)(mat, v, res); + + for (i = 0; i < 3; i++) { + ASSERT(test_eq(res[i], + v[0] * mat[0][i] + + v[1] * mat[1][i] + + v[2] * mat[2][i])) + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_trace) { + mat3 mat = A_MATRIX; + float trace; + + trace = GLM(mat3_trace)(mat); + + ASSERT(test_eq(trace, mat[0][0] + mat[1][1] + mat[2][2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_quat) { + mat3 m1, m3; + mat4 m2; + versor q1, q2, q3; + vec3 axis1; + vec3 axis2 = {1.9f, 2.3f, 4.5f}; + + GLM(quat)(q1, GLM_PI_4f, 1.9f, 2.3f, 4.5f); + GLM(quat_mat3)(q1, m1); + GLM(mat3_quat)(m1, q2); + + GLM(rotate_make)(m2, GLM_PI_4f, axis2); + GLM(mat3_quat)(m1, q3); + + GLM(quat_axis)(q3, axis1); + + GLM(vec3_normalize)(axis1); + GLM(vec3_normalize)(axis2); + + GLM(mat4_pick3)(m2, m3); + + ASSERT(test_eq(glm_quat_angle(q3), GLM_PI_4f)) + ASSERTIFY(test_assert_vec3_eq(axis1, axis2)) + ASSERTIFY(test_assert_vec4_eq(q1, q2)) + ASSERTIFY(test_assert_mat3_eq(m1, m3)) + ASSERTIFY(test_assert_vec4_eq(q1, q3)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_transpose_to) { + mat3 mat = A_MATRIX; + mat3 m1; + + GLM(mat3_transpose_to)(mat, m1); + + ASSERTIFY(test_assert_mat3_eqt(mat, m1)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_transpose) { + mat3 mat = A_MATRIX; + mat3 m1; + + GLM(mat3_copy)(mat, m1); + GLM(mat3_transpose)(m1); + + ASSERTIFY(test_assert_mat3_eqt(mat, m1)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_scale) { + mat3 m1 = A_MATRIX; + mat3 m2 = A_MATRIX; + int i, j, k, scale; + + scale = rand() % 100; + + GLM(mat3_scale)(m1, (float)scale); + + for (i = 0; i < 3; i++) { + for (j = 0; j < 3; j++) { + for (k = 0; k < 3; k++) + ASSERT(test_eq(m1[i][j], m2[i][j] * scale)) + } + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_det) { + mat3 mat; + float a, b, c, + d, e, f, + g, h, i; + float det1, det2; + + test_rand_mat3(mat); + + a = mat[0][0]; b = mat[0][1]; c = mat[0][2]; + d = mat[1][0]; e = mat[1][1]; f = mat[1][2]; + g = mat[2][0]; h = mat[2][1]; i = mat[2][2]; + + det1 = a * (e * i - h * f) - d * (b * i - c * h) + g * (b * f - c * e); + det2 = GLM(mat3_det)(mat); + + ASSERT(test_eq(det1, det2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_inv) { + mat3 m1 = GLM_MAT3_IDENTITY_INIT; + mat3 m2 = GLM_MAT3_IDENTITY_INIT; + mat3 m3; + int i; + + for (i = 0; i < 100000; i++) { + test_rand_mat3(m1); + test_rand_mat3(m2); + + /* test inverse precise */ + GLM(mat3_inv)(m1, m2); + GLM(mat3_inv)(m2, m3); + ASSERTIFY(test_assert_mat3_eq(m1, m3)) + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_swap_col) { + mat3 m1 = A_MATRIX; + mat3 m2 = A_MATRIX; + + GLM(mat3_swap_col)(m1, 0, 1); + + ASSERTIFY(test_assert_vec3_eq(m1[0], m2[1])) + ASSERTIFY(test_assert_vec3_eq(m1[1], m2[0])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_swap_row) { + mat3 m1 = A_MATRIX; + mat3 m2 = A_MATRIX; + + GLM(mat3_swap_row)(m1, 0, 1); + + ASSERT(test_eq(m1[0][0], m2[0][1])) + ASSERT(test_eq(m1[0][1], m2[0][0])) + ASSERT(test_eq(m1[0][2], m2[0][2])) + + ASSERT(test_eq(m1[1][0], m2[1][1])) + ASSERT(test_eq(m1[1][1], m2[1][0])) + ASSERT(test_eq(m1[1][2], m2[1][2])) + + ASSERT(test_eq(m1[2][0], m2[2][1])) + ASSERT(test_eq(m1[2][1], m2[2][0])) + ASSERT(test_eq(m1[2][2], m2[2][2])) + + GLM(mat3_swap_row)(m1, 1, 2); + + ASSERT(test_eq(m1[0][0], m2[0][1])) + ASSERT(test_eq(m1[0][1], m2[0][2])) + ASSERT(test_eq(m1[0][2], m2[0][0])) + + ASSERT(test_eq(m1[1][0], m2[1][1])) + ASSERT(test_eq(m1[1][1], m2[1][2])) + ASSERT(test_eq(m1[1][2], m2[1][0])) + + ASSERT(test_eq(m1[2][0], m2[2][1])) + ASSERT(test_eq(m1[2][1], m2[2][2])) + ASSERT(test_eq(m1[2][2], m2[2][0])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat3_rmc) { + mat3 mat = A_MATRIX; + vec3 v = {11.0f, 12.0f, 13.0f}; + vec3 v1; + float r1, r2; + int i; + + r1 = GLM(mat3_rmc)(v, mat, v); + + for (i = 0; i < 3; i++) { + v1[i] = v[0] * mat[i][0] + + v[1] * mat[i][1] + + v[2] * mat[i][2]; + } + + r2 = v[0] * v1[0] + v[1] * v1[1] + v[2] * v1[2]; + + ASSERT(test_eq(r1, r2)) + + TEST_SUCCESS +} + +#undef A_MATRIX diff --git a/test/src/test_mat4.c b/test/src/test_mat4.c deleted file mode 100644 index f369829..0000000 --- a/test/src/test_mat4.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c), Recep Aslantas. - * - * MIT License (MIT), http://opensource.org/licenses/MIT - * Full license can be found in the LICENSE file - */ - -#include "test_common.h" - -#define m 4 -#define n 4 - -void -test_mat4(void **state) { - mat4 m1 = GLM_MAT4_IDENTITY_INIT; - mat4 m2 = GLM_MAT4_IDENTITY_INIT; - mat4 m3; - mat4 m4 = GLM_MAT4_ZERO_INIT; - mat4 m5; - int i, j, k; - - /* test identity matrix multiplication */ - glm_mat4_mul(m1, m2, m3); - for (i = 0; i < m; i++) { - for (j = 0; j < n; j++) { - if (i == j) - assert_true(glm_eq(m3[i][j], 1.0f)); - else - assert_true(glm_eq(m3[i][j], 0.0f)); - } - } - - /* test random matrices */ - /* random matrices */ - test_rand_mat4(m1); - test_rand_mat4(m2); - - glm_mat4_mul(m1, m2, m3); - for (i = 0; i < m; i++) { - for (j = 0; j < n; j++) { - for (k = 0; k < m; k++) - /* column-major */ - m4[i][j] += m1[k][j] * m2[i][k]; - } - } - - test_assert_mat4_eq(m3, m4); - - /* test pre compiled */ - glmc_mat4_mul(m1, m2, m3); - test_assert_mat4_eq(m3, m4); - - for (i = 0; i < 100000; i++) { - test_rand_mat4(m3); - test_rand_mat4(m4); - - /* test inverse precise */ - glm_mat4_inv_precise(m3, m4); - glm_mat4_inv_precise(m4, m5); - test_assert_mat4_eq(m3, m5); - - test_rand_mat4(m3); - test_rand_mat4(m4); - - glmc_mat4_inv_precise(m3, m4); - glmc_mat4_inv_precise(m4, m5); - test_assert_mat4_eq(m3, m5); - - /* test inverse rcp */ - test_rand_mat4(m3); - test_rand_mat4(m4); - - glm_mat4_inv_fast(m3, m4); - glm_mat4_inv_fast(m4, m5); - test_assert_mat4_eq2(m3, m5, 0.0009f); - - test_rand_mat4(m3); - test_rand_mat4(m4); - - glmc_mat4_inv(m3, m4); - glmc_mat4_inv(m4, m5); - test_assert_mat4_eq2(m3, m5, 0.0009f); - } - - /* print */ - glm_mat4_print(m3, stderr); - glm_mat4_print(m4, stderr); - - /* test determinant */ - assert_int_equal(glm_mat4_det(m1), glmc_mat4_det(m1)); -#if defined( __SSE2__ ) - assert_int_equal(glmc_mat4_det(m1), glm_mat4_det_sse2(m1)); -#endif -} diff --git a/test/src/test_mat4.h b/test/src/test_mat4.h new file mode 100644 index 0000000..d42e457 --- /dev/null +++ b/test/src/test_mat4.h @@ -0,0 +1,488 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#include "test_common.h" + +#define A_MATRIX {{1,2,3,4},{5,6,7,8},{9,10,11,12},{13,14,15,16}} +#define A_MATRIX3 {{1,2,3},{5,6,7},{9,10,11}} + +TEST_IMPL(GLM_PREFIX, mat4_ucopy) { + mat4 m1 = A_MATRIX; + mat4 m2 = GLM_MAT4_IDENTITY_INIT; + + GLM(mat4_ucopy)(m1, m2); + + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_copy) { + mat4 m1 = A_MATRIX; + mat4 m2 = GLM_MAT4_IDENTITY_INIT; + + GLM(mat4_copy)(m1, m2); + + test_assert_mat4_eq(m1, m2); + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_identity) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + mat4 m2 = GLM_MAT4_IDENTITY_INIT; + mat4 m3; + + GLM(mat4_identity)(m3); + + ASSERTIFY(test_assert_mat4_eq_identity(m1)) + ASSERTIFY(test_assert_mat4_eq_identity(m2)) + ASSERTIFY(test_assert_mat4_eq_identity(m3)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_identity_array) { + int i, count; + mat4 matrices[4] = { + A_MATRIX, + A_MATRIX, + A_MATRIX, + A_MATRIX + }; + + count = 4; + + GLM(mat4_identity_array)(matrices, count); + + for (i = 0; i < count; i++) { + ASSERTIFY(test_assert_mat4_eq_identity(matrices[i])) + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_zero) { + mat4 m1 = GLM_MAT4_ZERO_INIT; + mat4 m2 = GLM_MAT4_ZERO_INIT; + mat4 m3; + + GLM(mat4_zero)(m3); + + ASSERTIFY(test_assert_mat4_eq_zero(m1)) + ASSERTIFY(test_assert_mat4_eq_zero(m2)) + ASSERTIFY(test_assert_mat4_eq_zero(m3)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_pick3) { + mat4 m1 = A_MATRIX; + mat3 m2 = GLM_MAT3_ZERO_INIT; + mat3 m3 = A_MATRIX3; + + GLM(mat4_pick3)(m1, m2); + + ASSERTIFY(test_assert_mat3_eq(m2, m3)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_pick3t) { + mat4 m1 = A_MATRIX; + mat3 m2 = GLM_MAT3_ZERO_INIT; + mat3 m3 = A_MATRIX3; + + GLM(mat4_pick3t)(m1, m2); + + ASSERTIFY(test_assert_mat3_eqt(m2, m3)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_ins3) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + mat3 m2 = A_MATRIX3; + int i, j; + + GLM(mat4_ins3)(m2, m1); + + for (i = 0; i < 3; i++) { + for (j = 0; j < 3; j++) { + ASSERT(m1[i][j] == m2[i][j]) + } + } + + ASSERT(test_eq(m1[3][0], 0.0f)) + ASSERT(test_eq(m1[3][1], 0.0f)) + ASSERT(test_eq(m1[3][2], 0.0f)) + ASSERT(test_eq(m1[3][3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_mul) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + mat4 m2 = GLM_MAT4_IDENTITY_INIT; + mat4 m3; + mat4 m4 = GLM_MAT4_ZERO_INIT; + int i, j, k; + + /* test random matrices */ + /* random matrices */ + test_rand_mat4(m1); + test_rand_mat4(m2); + + GLM(mat4_mul)(m1, m2, m3); + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + for (k = 0; k < 4; k++) + /* column-major */ + m4[i][j] += m1[k][j] * m2[i][k]; + } + } + + ASSERTIFY(test_assert_mat4_eq(m3, m4)) + + /* test pre compiled */ + GLM(mat4_mul)(m1, m2, m3); + ASSERTIFY(test_assert_mat4_eq(m3, m4)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_mulN) { + mat4 res1, res2; + mat4 m1 = A_MATRIX; + mat4 m2 = A_MATRIX; + mat4 m3 = A_MATRIX; + + mat4 *matrices[] = { + &m1, &m2, &m3 + }; + + GLM(mat4_mulN)(matrices, sizeof(matrices) / sizeof(matrices[0]), res1); + + GLM(mat4_mul)(*matrices[0], *matrices[1], res2); + GLM(mat4_mul)(res2, *matrices[2], res2); + + ASSERTIFY(test_assert_mat4_eq(res1, res1)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_mulv) { + vec4 res; + mat4 mat = A_MATRIX; + vec4 v = {1.0f, 2.0f, 3.0f, 4.0f}; + int i; + + GLM(mat4_mulv)(mat, v, res); + + for (i = 0; i < 4; i++) { + ASSERT(test_eq(res[i], + v[0] * mat[0][i] + + v[1] * mat[1][i] + + v[2] * mat[2][i] + + v[3] * mat[3][i])) + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_mulv3) { + vec4 res; + mat4 mat = A_MATRIX; + vec3 v = {1.0f, 2.0f, 3.0f}; + float last; + int i; + + last = 1.0f; + + GLM(mat4_mulv3)(mat, v, last, res); + + for (i = 0; i < 3; i++) { + ASSERT(test_eq(res[i], + v[0] * mat[0][i] + + v[1] * mat[1][i] + + v[2] * mat[2][i] + + last * mat[3][i])) + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_trace) { + mat4 mat = A_MATRIX; + float trace; + + trace = GLM(mat4_trace)(mat); + + ASSERT(test_eq(trace, mat[0][0] + mat[1][1] + mat[2][2] + mat[3][3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_trace3) { + mat4 mat = A_MATRIX; + float trace; + + trace = GLM(mat4_trace3)(mat); + + ASSERT(test_eq(trace, mat[0][0] + mat[1][1] + mat[2][2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_quat) { + mat4 m1, m2; + versor q1, q2, q3; + vec3 axis1; + vec3 axis2 = {1.9f, 2.3f, 4.5f}; + + GLM(quat)(q1, GLM_PI_4f, 1.9f, 2.3f, 4.5f); + GLM(quat_mat4)(q1, m1); + GLM(mat4_quat)(m1, q2); + + GLM(rotate_make)(m2, GLM_PI_4f, axis2); + GLM(mat4_quat)(m1, q3); + + GLM(quat_axis)(q3, axis1); + + GLM(vec3_normalize)(axis1); + GLM(vec3_normalize)(axis2); + + ASSERT(test_eq(glm_quat_angle(q3), GLM_PI_4f)) + ASSERTIFY(test_assert_vec3_eq(axis1, axis2)) + ASSERTIFY(test_assert_vec4_eq(q1, q2)) + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + ASSERTIFY(test_assert_vec4_eq(q1, q3)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_transpose_to) { + mat4 mat = A_MATRIX; + mat4 m1; + + GLM(mat4_transpose_to)(mat, m1); + + ASSERTIFY(test_assert_mat4_eqt(mat, m1)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_transpose) { + mat4 mat = A_MATRIX; + mat4 m1; + + GLM(mat4_copy)(mat, m1); + GLM(mat4_transpose)(m1); + + ASSERTIFY(test_assert_mat4_eqt(mat, m1)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_scale_p) { + mat4 m1 = A_MATRIX; + mat4 m2 = A_MATRIX; + int i, j, k, scale; + + scale = rand() % 100; + + GLM(mat4_scale_p)(m1, (float)scale); + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + for (k = 0; k < 4; k++) + ASSERT(test_eq(m1[i][j], m2[i][j] * scale)) + } + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_scale) { + mat4 m1 = A_MATRIX; + mat4 m2 = A_MATRIX; + int i, j, k, scale; + + scale = rand() % 100; + + GLM(mat4_scale)(m1, (float)scale); + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + for (k = 0; k < 4; k++) + ASSERT(test_eq(m1[i][j], m2[i][j] * scale)) + } + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_det) { + mat4 mat = GLM_MAT4_IDENTITY_INIT; + float t[6]; + float a, b, c, d, + e, f, g, h, + i, j, k, l, + m, n, o, p; + float det1, det2; + + test_rand_mat4(mat); + + a = mat[0][0]; b = mat[0][1]; c = mat[0][2]; d = mat[0][3]; + e = mat[1][0]; f = mat[1][1]; g = mat[1][2]; h = mat[1][3]; + i = mat[2][0]; j = mat[2][1]; k = mat[2][2]; l = mat[2][3]; + m = mat[3][0]; n = mat[3][1]; o = mat[3][2]; p = mat[3][3]; + + t[0] = k * p - o * l; + t[1] = j * p - n * l; + t[2] = j * o - n * k; + t[3] = i * p - m * l; + t[4] = i * o - m * k; + t[5] = i * n - m * j; + + det1 = a * (f * t[0] - g * t[1] + h * t[2]) + - b * (e * t[0] - g * t[3] + h * t[4]) + + c * (e * t[1] - f * t[3] + h * t[5]) + - d * (e * t[2] - f * t[4] + g * t[5]); + + det2 = GLM(mat4_det(mat)); + + ASSERT(test_eq(det1, det2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_inv) { + mat4 m1, m2, m3; + int i; + + for (i = 0; i < 100000; i++) { + test_rand_mat4(m1); + test_rand_mat4(m2); + + /* test inverse precise */ + GLM(mat4_inv)(m1, m2); + GLM(mat4_inv)(m2, m3); + ASSERTIFY(test_assert_mat4_eq(m1, m3)) + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_inv_precise) { + mat4 m1, m2, m3; + mat4 m4, m5, m6; + int i; + + for (i = 0; i < 100000; i++) { + test_rand_mat4(m1); + test_rand_mat4(m2); + + glm_mat4_inv_precise(m1, m2); + glm_mat4_inv_precise(m2, m3); + ASSERTIFY(test_assert_mat4_eq(m1, m3)) + + test_rand_mat4(m4); + test_rand_mat4(m5); + + glmc_mat4_inv_precise(m4, m5); + glmc_mat4_inv_precise(m5, m6); + ASSERTIFY(test_assert_mat4_eq(m4, m6)) + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_inv_fast) { + mat4 m1, m2, m3; + int i; + + for (i = 0; i < 100000; i++) { + test_rand_mat4(m1); + test_rand_mat4(m2); + + /* test inverse precise */ + GLM(mat4_inv_fast)(m1, m2); + GLM(mat4_inv_fast)(m2, m3); + ASSERTIFY(test_assert_mat4_eq2(m1, m3, 0.0009f)) + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_swap_col) { + mat4 m1 = A_MATRIX; + mat4 m2 = A_MATRIX; + + GLM(mat4_swap_col)(m1, 0, 1); + GLM(mat4_swap_col)(m1, 2, 3); + + ASSERTIFY(test_assert_vec4_eq(m1[0], m2[1])) + ASSERTIFY(test_assert_vec4_eq(m1[1], m2[0])) + ASSERTIFY(test_assert_vec4_eq(m1[2], m2[3])) + ASSERTIFY(test_assert_vec4_eq(m1[3], m2[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_swap_row) { + mat4 m1 = A_MATRIX; + mat4 m2 = A_MATRIX; + + GLM(mat4_swap_row)(m1, 0, 1); + GLM(mat4_swap_row)(m1, 2, 3); + + ASSERT(test_eq(m1[0][0], m2[0][1])) + ASSERT(test_eq(m1[0][1], m2[0][0])) + ASSERT(test_eq(m1[0][2], m2[0][3])) + ASSERT(test_eq(m1[0][3], m2[0][2])) + ASSERT(test_eq(m1[1][0], m2[1][1])) + ASSERT(test_eq(m1[1][1], m2[1][0])) + ASSERT(test_eq(m1[1][2], m2[1][3])) + ASSERT(test_eq(m1[1][3], m2[1][2])) + ASSERT(test_eq(m1[2][0], m2[2][1])) + ASSERT(test_eq(m1[2][1], m2[2][0])) + ASSERT(test_eq(m1[2][2], m2[2][3])) + ASSERT(test_eq(m1[2][3], m2[2][2])) + ASSERT(test_eq(m1[3][0], m2[3][1])) + ASSERT(test_eq(m1[3][1], m2[3][0])) + ASSERT(test_eq(m1[3][2], m2[3][3])) + ASSERT(test_eq(m1[3][3], m2[3][2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, mat4_rmc) { + mat4 mat = A_MATRIX; + vec4 v = {1.0f, 2.0f, 3.0f, 4.0f}; + vec4 v1; + float r1, r2; + int i; + + r1 = GLM(mat4_rmc)(v, mat, v); + + for (i = 0; i < 4; i++) { + v1[i] = v[0] * mat[i][0] + + v[1] * mat[i][1] + + v[2] * mat[i][2] + + v[3] * mat[i][3]; + } + + r2 = v[0] * v1[0] + v[1] * v1[1] + v[2] * v1[2] + v[3] * v1[3]; + + ASSERT(test_eq(r1, r2)) + + TEST_SUCCESS +} + +#undef A_MATRIX +#undef A_MATRIX3 diff --git a/test/src/test_plane.h b/test/src/test_plane.h new file mode 100644 index 0000000..896c8b5 --- /dev/null +++ b/test/src/test_plane.h @@ -0,0 +1,39 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#include "test_common.h" + +TEST_IMPL(GLM_PREFIX, plane_normalize) { + vec4 p1 = {2.0f, -3.0f, 4.0f, 5.0f}, p2 = {2.0f, -3.0f, 4.0f, 5.0f}; + float s = 1.0f; + float norm; + + GLM(plane_normalize)(p2); + + norm = sqrtf(p1[0] * p1[0] + p1[1] * p1[1] + p1[2] * p1[2]); + if (norm == 0.0f) { + ASSERT(test_eq(p1[0], 0.0f)) + ASSERT(test_eq(p1[1], 0.0f)) + ASSERT(test_eq(p1[2], 0.0f)) + ASSERT(test_eq(p1[3], 0.0f)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(test_eq(p1[0] * norm, p2[0])) + ASSERT(test_eq(p1[1] * norm, p2[1])) + ASSERT(test_eq(p1[2] * norm, p2[2])) + ASSERT(test_eq(p1[3] * norm, p2[3])) + + glm_vec4_zero(p1); + GLM(plane_normalize)(p1); + ASSERTIFY(test_assert_vec4_eq(p1, GLM_VEC4_ZERO)) + + TEST_SUCCESS +} diff --git a/test/src/test_project.c b/test/src/test_project.c deleted file mode 100644 index 4cdac9e..0000000 --- a/test/src/test_project.c +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c), Recep Aslantas. - * - * MIT License (MIT), http://opensource.org/licenses/MIT - * Full license can be found in the LICENSE file - */ - -#include "test_common.h" - -void -test_project(void **state) { - mat4 model, view, proj, mvp; - vec4 viewport = {0.0f, 0.0f, 800.0f, 600.0f}; - vec3 pos = {13.0f, 45.0f, 0.74f}; - vec3 projected, unprojected; - - glm_translate_make(model, (vec3){0.0f, 0.0f, -10.0f}); - glm_lookat((vec3){0.0f, 0.0f, 0.0f}, pos, GLM_YUP, view); - - glm_perspective_default(0.5f, proj); - glm_mat4_mulN((mat4 *[]){&proj, &view, &model}, 3, mvp); - - glmc_project(pos, mvp, viewport, projected); - glmc_unproject(projected, mvp, viewport, unprojected); - - /* unprojected of projected vector must be same as original one */ - /* we used 0.01 because of projection floating point errors */ - assert_true(fabsf(pos[0] - unprojected[0]) < 0.01); - assert_true(fabsf(pos[1] - unprojected[1]) < 0.01); - assert_true(fabsf(pos[2] - unprojected[2]) < 0.01); -} diff --git a/test/src/test_project.h b/test/src/test_project.h new file mode 100644 index 0000000..41bdecd --- /dev/null +++ b/test/src/test_project.h @@ -0,0 +1,92 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#include "test_common.h" + +TEST_IMPL(GLM_PREFIX, unprojecti) { + mat4 model, view, proj, mvp; + vec4 viewport = {0.0f, 0.0f, 800.0f, 600.0f}; + vec3 pos = {13.0f, 45.0f, 0.74f}; + vec3 projected, unprojected; + + glm_translate_make(model, (vec3){0.0f, 0.0f, -10.0f}); + glm_lookat((vec3){0.0f, 0.0f, 0.0f}, pos, GLM_YUP, view); + + glm_perspective_default(0.5f, proj); + glm_mat4_mulN((mat4 *[]){&proj, &view, &model}, 3, mvp); + + GLM(project)(pos, mvp, viewport, projected); + + glm_mat4_inv(mvp, mvp); + GLM(unprojecti)(projected, mvp, viewport, unprojected); + + /* unprojected of projected vector must be same as original one */ + /* we used 0.01 because of projection floating point errors */ + ASSERT(fabsf(pos[0] - unprojected[0]) < 0.01) + ASSERT(fabsf(pos[1] - unprojected[1]) < 0.01) + ASSERT(fabsf(pos[2] - unprojected[2]) < 0.01) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, unproject) { + mat4 model, view, proj, mvp; + vec4 viewport = {0.0f, 0.0f, 800.0f, 600.0f}; + vec3 pos = {13.0f, 45.0f, 0.74f}; + vec3 projected, unprojected; + + glm_translate_make(model, (vec3){0.0f, 0.0f, -10.0f}); + glm_lookat((vec3){0.0f, 0.0f, 0.0f}, pos, GLM_YUP, view); + + glm_perspective_default(0.5f, proj); + glm_mat4_mulN((mat4 *[]){&proj, &view, &model}, 3, mvp); + + GLM(project)(pos, mvp, viewport, projected); + GLM(unproject)(projected, mvp, viewport, unprojected); + + /* unprojected of projected vector must be same as original one */ + /* we used 0.01 because of projection floating point errors */ + ASSERT(fabsf(pos[0] - unprojected[0]) < 0.01) + ASSERT(fabsf(pos[1] - unprojected[1]) < 0.01) + ASSERT(fabsf(pos[2] - unprojected[2]) < 0.01) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, project) { + mat4 model, view, proj, mvp; + vec4 viewport = {0.0f, 0.0f, 800.0f, 600.0f}; + vec3 pos = {13.0f, 45.0f, 0.74f}; + vec3 projected, unprojected; + + glm_translate_make(model, (vec3){0.0f, 0.0f, -10.0f}); + glm_lookat((vec3){0.0f, 0.0f, 0.0f}, pos, GLM_YUP, view); + + glm_perspective_default(0.5f, proj); + glm_mat4_mulN((mat4 *[]){&proj, &view, &model}, 3, mvp); + + GLM(project)(pos, mvp, viewport, projected); + GLM(unproject)(projected, mvp, viewport, unprojected); + + /* unprojected of projected vector must be same as original one */ + /* we used 0.01 because of projection floating point errors */ + ASSERT(fabsf(pos[0] - unprojected[0]) < 0.01) + ASSERT(fabsf(pos[1] - unprojected[1]) < 0.01) + ASSERT(fabsf(pos[2] - unprojected[2]) < 0.01) + + /* test with no projection */ + glm_mat4_identity(mvp); + + GLM(project)(pos, mvp, viewport, projected); + GLM(unproject)(projected, mvp, viewport, unprojected); + + ASSERT(test_eq(pos[0], unprojected[0])) + ASSERT(test_eq(pos[1], unprojected[1])) + ASSERT(test_eq(pos[2], unprojected[2])) + + TEST_SUCCESS +} diff --git a/test/src/test_quat.c b/test/src/test_quat.c deleted file mode 100644 index d0cc27c..0000000 --- a/test/src/test_quat.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright (c), Recep Aslantas. - * - * MIT License (MIT), http://opensource.org/licenses/MIT - * Full license can be found in the LICENSE file - */ - -#include "test_common.h" - -CGLM_INLINE -void -test_quat_mul_raw(versor p, versor q, versor dest) { - dest[0] = p[3] * q[0] + p[0] * q[3] + p[1] * q[2] - p[2] * q[1]; - dest[1] = p[3] * q[1] - p[0] * q[2] + p[1] * q[3] + p[2] * q[0]; - dest[2] = p[3] * q[2] + p[0] * q[1] - p[1] * q[0] + p[2] * q[3]; - dest[3] = p[3] * q[3] - p[0] * q[0] - p[1] * q[1] - p[2] * q[2]; -} - -void -test_quat(void **state) { - mat4 inRot, outRot, view1, view2, rot1, rot2; - versor inQuat, outQuat, q3, q4, q5; - vec3 eye, axis, imag, v1, v2; - int i; - - /* 0. test identiy quat */ - glm_quat_identity(q4); - assert_true(glm_eq(glm_quat_real(q4), cosf(glm_rad(0.0f) * 0.5f))); - glm_quat_mat4(q4, rot1); - test_assert_mat4_eq2(rot1, GLM_MAT4_IDENTITY, 0.000009); - - /* 1. test quat to mat and mat to quat */ - for (i = 0; i < 1000; i++) { - test_rand_quat(inQuat); - - glmc_quat_mat4(inQuat, inRot); - glmc_mat4_quat(inRot, outQuat); - glmc_quat_mat4(outQuat, outRot); - - /* 2. test first quat and generated one equality */ - test_assert_quat_eq_abs(inQuat, outQuat); - - /* 3. test first rot and second rotation */ - test_assert_mat4_eq2(inRot, outRot, 0.000009); /* almost equal */ - - /* 4. test SSE mul and raw mul */ -#if defined( __SSE__ ) || defined( __SSE2__ ) - test_quat_mul_raw(inQuat, outQuat, q3); - glm_quat_mul_sse2(inQuat, outQuat, q4); - test_assert_quat_eq(q3, q4); -#endif - } - - /* 5. test lookat */ - test_rand_vec3(eye); - glm_quatv(q3, glm_rad(-90.0f), GLM_YUP); - - /* now X axis must be forward axis, Z must be right axis */ - glm_look(eye, GLM_XUP, GLM_YUP, view1); - - /* create view matrix with quaternion */ - glm_quat_look(eye, q3, view2); - - test_assert_mat4_eq2(view1, view2, 0.000009); - - /* 6. test quaternion rotation matrix result */ - test_rand_quat(q3); - glm_quat_mat4(q3, rot1); - - /* 6.1 test axis and angle of quat */ - glm_quat_axis(q3, axis); - glm_rotate_make(rot2, glm_quat_angle(q3), axis); - - test_assert_mat4_eq2(rot1, rot2, 0.000009); - - /* 7. test quaternion multiplication (hamilton product), - final rotation = first rotation + second = quat1 * quat2 - */ - test_rand_quat(q3); - test_rand_quat(q4); - - glm_quat_mul(q3, q4, q5); - - glm_quat_axis(q3, axis); - glm_rotate_make(rot1, glm_quat_angle(q3), axis); - - glm_quat_axis(q4, axis); - glm_rotate(rot1, glm_quat_angle(q4), axis); - - /* rot2 is combine of two rotation now test with quaternion result */ - glm_quat_mat4(q5, rot2); - - /* result must be same (almost) */ - test_assert_mat4_eq2(rot1, rot2, 0.000009); - - /* 8. test quaternion for look rotation */ - - /* 8.1 same direction */ - /* look at from 0, 0, 1 to zero, direction = 0, 0, -1 */ - glm_quat_for((vec3){0, 0, -1}, (vec3){0, 0, -1}, GLM_YUP, q3); - - /* result must be identity */ - glm_quat_identity(q4); - test_assert_quat_eq(q3, q4); - - /* look at from 0, 0, 1 to zero, direction = 0, 0, -1 */ - glm_quat_forp(GLM_ZUP, GLM_VEC3_ZERO, (vec3){0, 0, -1}, GLM_YUP, q3); - - /* result must be identity */ - glm_quat_identity(q4); - test_assert_quat_eq(q3, q4); - - /* 8.2 perpendicular */ - glm_quat_for(GLM_XUP, (vec3){0, 0, -1}, GLM_YUP, q3); - - /* result must be -90 */ - glm_quatv(q4, glm_rad(-90.0f), GLM_YUP); - test_assert_quat_eq(q3, q4); - - /* 9. test imag, real */ - - /* 9.1 real */ - assert_true(glm_eq(glm_quat_real(q4), cosf(glm_rad(-90.0f) * 0.5f))); - - /* 9.1 imag */ - glm_quat_imag(q4, imag); - - /* axis = Y_UP * sinf(angle * 0.5), YUP = 0, 1, 0 */ - axis[0] = 0.0f; - axis[1] = sinf(glm_rad(-90.0f) * 0.5f) * 1.0f; - axis[2] = 0.0f; - - assert_true(glm_vec3_eqv_eps(imag, axis)); - - /* 9.2 axis */ - glm_quat_axis(q4, axis); - imag[0] = 0.0f; - imag[1] = -1.0f; - imag[2] = 0.0f; - - test_assert_vec3_eq(imag, axis); - - /* 10. test rotate vector using quat */ - /* (0,0,-1) around (1,0,0) must give (0,1,0) */ - v1[0] = 0.0f; v1[1] = 0.0f; v1[2] = -1.0f; - v2[0] = 0.0f; v2[1] = 0.0f; v2[2] = -1.0f; - - glm_vec3_rotate(v1, glm_rad(90.0f), (vec3){1.0f, 0.0f, 0.0f}); - glm_quatv(q3, glm_rad(90.0f), (vec3){1.0f, 0.0f, 0.0f}); - - glm_vec4_scale(q3, 1.5, q3); - glm_quat_rotatev(q3, v2, v2); - - /* result must be : (0,1,0) */ - assert_true(fabsf(v1[0]) <= 0.00009f - && fabsf(v1[1] - 1.0f) <= 0.00009f - && fabsf(v1[2]) <= 0.00009f); - - test_assert_vec3_eq(v1, v2); - - /* 11. test rotate transform */ - glm_translate_make(rot1, (vec3){-10.0, 45.0f, 8.0f}); - glm_rotate(rot1, glm_rad(-90), GLM_ZUP); - - glm_quatv(q3, glm_rad(-90.0f), GLM_ZUP); - glm_translate_make(rot2, (vec3){-10.0, 45.0f, 8.0f}); - glm_quat_rotate(rot2, q3, rot2); - - /* result must be same (almost) */ - test_assert_mat4_eq2(rot1, rot2, 0.000009); - - glm_rotate_make(rot1, glm_rad(-90), GLM_ZUP); - glm_translate(rot1, (vec3){-10.0, 45.0f, 8.0f}); - - glm_quatv(q3, glm_rad(-90.0f), GLM_ZUP); - glm_mat4_identity(rot2); - glm_quat_rotate(rot2, q3, rot2); - glm_translate(rot2, (vec3){-10.0, 45.0f, 8.0f}); - - /* result must be same (almost) */ - test_assert_mat4_eq2(rot1, rot2, 0.000009); - - /* reverse */ - glm_rotate_make(rot1, glm_rad(-90), GLM_ZUP); - glm_quatv(q3, glm_rad(90.0f), GLM_ZUP); - glm_quat_rotate(rot1, q3, rot1); - - /* result must be identity */ - test_assert_mat4_eq2(rot1, GLM_MAT4_IDENTITY, 0.000009); - - test_rand_quat(q3); - - /* 12. inverse of quat, multiplication must be IDENTITY */ - glm_quat_inv(q3, q4); - glm_quat_mul(q3, q4, q5); - - glm_quat_identity(q3); - test_assert_quat_eq(q3, q5); - - /* TODO: add tests for slerp, lerp */ -} diff --git a/test/src/test_quat.h b/test/src/test_quat.h new file mode 100644 index 0000000..0ef7ca7 --- /dev/null +++ b/test/src/test_quat.h @@ -0,0 +1,1001 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#include "test_common.h" + +#ifndef CGLM_TEST_QUAT_ONCE +#define CGLM_TEST_QUAT_ONCE + +/* Macros */ + +TEST_IMPL(MACRO_GLM_QUAT_IDENTITY_INIT) { + versor v = GLM_QUAT_IDENTITY_INIT; + + ASSERT(test_eq(v[0], 0.0f)) + ASSERT(test_eq(v[1], 0.0f)) + ASSERT(test_eq(v[2], 0.0f)) + ASSERT(test_eq(v[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_QUAT_IDENTITY) { + ASSERT(test_eq(GLM_QUAT_IDENTITY[0], 0.0f)) + ASSERT(test_eq(GLM_QUAT_IDENTITY[1], 0.0f)) + ASSERT(test_eq(GLM_QUAT_IDENTITY[2], 0.0f)) + ASSERT(test_eq(GLM_QUAT_IDENTITY[3], 1.0f)) + + TEST_SUCCESS +} + +#endif /* CGLM_TEST_QUAT_ONCE */ + +TEST_IMPL(GLM_PREFIX, quat_identity) { + versor a = GLM_QUAT_IDENTITY_INIT; + versor b = GLM_QUAT_IDENTITY_INIT; + versor c; + mat4 r; + + GLM(quat_identity)(c); + + ASSERTIFY(test_assert_quat_eq_identity(a)) + ASSERTIFY(test_assert_quat_eq_identity(b)) + ASSERTIFY(test_assert_quat_eq_identity(c)) + + glm_quat_identity(c); + ASSERT(test_eq(glm_quat_real(c), cosf(glm_rad(0.0f) * 0.5f))) + + glm_quat_mat4(c, r); + ASSERTIFY(test_assert_mat4_eq2(r, GLM_MAT4_IDENTITY, 0.000009f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_identity_array) { + int i, count; + versor quats[4] = { + {1.0f, 2.0f, 3.0f, 4.0f}, + {1.0f, 2.0f, 3.0f, 4.0f}, + {1.0f, 2.0f, 3.0f, 4.0f}, + {1.0f, 2.0f, 3.0f, 4.0f}, + }; + + count = 4; + + GLM(quat_identity_array)(quats, count); + + for (i = 0; i < count; i++) { + ASSERTIFY(test_assert_quat_eq_identity(quats[i])) + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_init) { + versor q1 = {1.0f, 2.0f, 3.0f, 4.0f}; + versor q2 = {1.0f, 2.0f, 3.0f, 4.0f}; + versor q3 = {1.0f, 2.0f, 3.0f, 4.0f}; + + GLM(quat_init)(q1, 10.0f, 11.0f, 12.0f, 13.0f); + GLM(quat_init)(q2, 100.0f, 110.0f, 120.0f, 130.0f); + GLM(quat_init)(q3, 1000.0f, 1100.0f, 1200.0f, 1300.0f); + + ASSERT(q1[0] == 10.0f) + ASSERT(q1[1] == 11.0f) + ASSERT(q1[2] == 12.0f) + ASSERT(q1[3] == 13.0f) + + ASSERT(q2[0] == 100.0f) + ASSERT(q2[1] == 110.0f) + ASSERT(q2[2] == 120.0f) + ASSERT(q2[3] == 130.0f) + + ASSERT(q3[0] == 1000.0f) + ASSERT(q3[1] == 1100.0f) + ASSERT(q3[2] == 1200.0f) + ASSERT(q3[3] == 1300.0f) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quatv) { + versor q1 = {1.0f, 2.0f, 3.0f, 4.0f}; + vec3 v1, v2; + float a1; + + test_rand_vec3(v1); + GLM(quatv)(q1, glm_rad(60.0f), v1); + + glm_quat_axis(q1, v2); + a1 = glm_quat_angle(q1); + + ASSERT(test_eq(a1, glm_rad(60.0f))) + + glm_vec3_normalize(v1); + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat) { + versor q1 = {1.0f, 2.0f, 3.0f, 4.0f}; + vec3 v1, v2; + float a1; + + test_rand_vec3(v1); + GLM(quat)(q1, glm_rad(60.0f), v1[0], v1[1], v1[2]); + + glm_quat_axis(q1, v2); + a1 = glm_quat_angle(q1); + + ASSERT(test_eq(a1, glm_rad(60.0f))) + + glm_vec3_normalize(v1); + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_copy) { + versor v1 = {10.0f, 9.0f, 8.0f, 78.0f}; + versor v2 = {1.0f, 2.0f, 3.0f, 4.0f}; + + GLM(quat_copy)(v1, v2); + + ASSERTIFY(test_assert_vec4_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_norm) { + versor a = {10.0f, 9.0f, 8.0f, 78.0f}; + float n1, n2; + + n1 = GLM(quat_norm)(a); + n2 = sqrtf(a[0] * a[0] + a[1] * a[1] + a[2] * a[2] + a[3] * a[3]); + + ASSERT(test_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_normalize_to) { + versor v1 = {2.0f, -3.0f, 4.0f, 5.0f}, v2; + float s = 1.0f; + float norm; + + GLM(quat_normalize_to)(v1, v2); + + norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2] + v1[3] * v1[3]); + if (norm <= 0.0f) { + ASSERTIFY(test_assert_quat_eq_identity(v1)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(test_eq(v1[0] * norm, v2[0])) + ASSERT(test_eq(v1[1] * norm, v2[1])) + ASSERT(test_eq(v1[2] * norm, v2[2])) + ASSERT(test_eq(v1[3] * norm, v2[3])) + + glm_vec4_zero(v1); + GLM(quat_normalize_to)(v1, v2); + ASSERTIFY(test_assert_quat_eq_identity(v2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_normalize) { + versor v1 = {2.0f, -3.0f, 4.0f, 5.0f}, v2 = {2.0f, -3.0f, 4.0f, 5.0f}; + float s = 1.0f; + float norm; + + GLM(quat_normalize)(v2); + + norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2] + v1[3] * v1[3]); + if (norm <= 0.0f) { + ASSERTIFY(test_assert_quat_eq_identity(v1)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(test_eq(v1[0] * norm, v2[0])) + ASSERT(test_eq(v1[1] * norm, v2[1])) + ASSERT(test_eq(v1[2] * norm, v2[2])) + ASSERT(test_eq(v1[3] * norm, v2[3])) + + glm_vec4_zero(v1); + GLM(quat_normalize)(v1); + ASSERTIFY(test_assert_quat_eq_identity(v1)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_dot) { + versor a = {10.0f, 9.0f, 8.0f, 78.0f}; + versor b = {1.0f, 2.0f, 3.0f, 4.0f}; + float dot1, dot2; + + dot1 = GLM(quat_dot)(a, b); + dot2 = a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]; + + ASSERT(test_eq(dot1, dot2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_conjugate) { + versor a = {10.0f, 9.0f, 8.0f, 78.0f}; + versor b = {1.0f, 2.0f, 3.0f, 4.0f}; + versor d, e; + + GLM(quat_conjugate)(a, d); + GLM(quat_conjugate)(b, e); + + ASSERT(test_eq(d[0], -a[0])) + ASSERT(test_eq(d[1], -a[1])) + ASSERT(test_eq(d[2], -a[2])) + ASSERT(test_eq(d[3], a[3])) + + ASSERT(test_eq(e[0], -b[0])) + ASSERT(test_eq(e[1], -b[1])) + ASSERT(test_eq(e[2], -b[2])) + ASSERT(test_eq(e[3], b[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_inv) { + versor a = {10.0f, 9.0f, 8.0f, 78.0f}; + versor b = {1.0f, 2.0f, 3.0f, 4.0f}; + versor d, e; + float n1, n2; + + n1 = 1.0f / glm_vec4_norm2(a); + n2 = 1.0f / glm_vec4_norm2(b); + + GLM(quat_inv)(a, d); + GLM(quat_inv)(b, e); + + ASSERT(test_eq(d[0], -a[0] * n1)) + ASSERT(test_eq(d[1], -a[1] * n1)) + ASSERT(test_eq(d[2], -a[2] * n1)) + ASSERT(test_eq(d[3], a[3] * n1)) + + ASSERT(test_eq(e[0], -b[0] * n2)) + ASSERT(test_eq(e[1], -b[1] * n2)) + ASSERT(test_eq(e[2], -b[2] * n2)) + ASSERT(test_eq(e[3], b[3] * n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_add) { + versor a = {-10.0f, 9.0f, -8.0f, 56.0f}; + versor b = {12.0f, 19.0f, -18.0f, 1.0f}; + versor c, d; + + c[0] = a[0] + b[0]; + c[1] = a[1] + b[1]; + c[2] = a[2] + b[2]; + c[3] = a[3] + b[3]; + + GLM(quat_add)(a, b, d); + + ASSERTIFY(test_assert_quat_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_sub) { + vec4 a = {-10.0f, 9.0f, -8.0f, 56.0f}; + vec4 b = {12.0f, 19.0f, -18.0f, 1.0f}; + vec4 c, d; + + c[0] = a[0] - b[0]; + c[1] = a[1] - b[1]; + c[2] = a[2] - b[2]; + c[3] = a[3] - b[3]; + + GLM(quat_sub)(a, b, d); + + ASSERTIFY(test_assert_quat_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_real) { + versor a = {10.0f, 9.0f, 8.0f, 78.0f}; + versor b = {1.0f, 2.0f, 3.0f, 4.0f}; + + ASSERT(test_eq(GLM(quat_real)(a), 78.0f)) + ASSERT(test_eq(GLM(quat_real)(b), 4.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_imag) { + versor a = {10.0f, 9.0f, 8.0f, 78.0f}; + versor b = {1.0f, 2.0f, 3.0f, 4.0f}; + vec3 d, e; + + GLM(quat_imag)(a, d); + GLM(quat_imag)(b, e); + + ASSERT(test_eq(d[0], a[0])) + ASSERT(test_eq(d[1], a[1])) + ASSERT(test_eq(d[2], a[2])) + + ASSERT(test_eq(e[0], b[0])) + ASSERT(test_eq(e[1], b[1])) + ASSERT(test_eq(e[2], b[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_imagn) { + versor a = {10.0f, 9.0f, 8.0f, 78.0f}; + versor b = {1.0f, 2.0f, 3.0f, 4.0f}; + vec3 d, e; + + GLM(quat_imagn)(a, d); + GLM(quat_imagn)(b, e); + + glm_vec3_normalize(a); + glm_vec3_normalize(b); + glm_vec3_normalize(d); + glm_vec3_normalize(e); + + ASSERT(test_eq(d[0], a[0])) + ASSERT(test_eq(d[1], a[1])) + ASSERT(test_eq(d[2], a[2])) + + ASSERT(test_eq(e[0], b[0])) + ASSERT(test_eq(e[1], b[1])) + ASSERT(test_eq(e[2], b[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_imaglen) { + versor a = {10.0f, 9.0f, 8.0f, 78.0f}; + versor b = {1.0f, 2.0f, 3.0f, 4.0f}; + + ASSERT(test_eq(GLM(quat_imaglen)(a), glm_vec3_norm(a))); + ASSERT(test_eq(GLM(quat_imaglen)(b), glm_vec3_norm(b))); + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_angle) { + versor q1 = {1.0f, 2.0f, 3.0f, 4.0f}, q2, q3; + vec3 v1; + float a1, a2, a3; + + test_rand_vec3(v1); + GLM(quatv)(q1, glm_rad(60.140f), v1); + GLM(quatv)(q2, glm_rad(160.04f), v1); + GLM(quatv)(q3, glm_rad(20.350f), v1); + + a1 = GLM(quat_angle)(q1); + a2 = GLM(quat_angle)(q2); + a3 = GLM(quat_angle)(q3); + + ASSERT(test_eq(a1, glm_rad(60.140f))) + ASSERT(test_eq(a2, glm_rad(160.04f))) + ASSERT(test_eq(a3, glm_rad(20.350f))) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_axis) { + versor q1 = {1.0f, 2.0f, 3.0f, 4.0f}, q2, q3; + vec3 v1, v2; + + test_rand_vec3(v1); + GLM(quatv)(q1, glm_rad(60.0f), v1); + + glm_quat_axis(q1, v2); + glm_vec3_normalize(v1); + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + test_rand_vec3(v1); + GLM(quatv)(q2, glm_rad(60.0f), v1); + + glm_quat_axis(q2, v2); + glm_vec3_normalize(v1); + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + test_rand_vec3(v1); + GLM(quatv)(q3, glm_rad(60.0f), v1); + + glm_quat_axis(q3, v2); + glm_vec3_normalize(v1); + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_mul) { + versor q1 = {2.0f, 3.0f, 4.0f, 5.0f}; + versor q2 = {6.0f, 7.0f, 8.0f, 9.0f}; + versor q3; + versor q4; + vec3 v1 = {1.5f, 2.5f, 3.5f}; + + GLM(quat_mul)(q1, q2, q3); + + ASSERT(test_eq(q3[0], q1[3] * q2[0] + q1[0] * q2[3] + q1[1] * q2[2] - q1[2] * q2[1])) + ASSERT(test_eq(q3[1], q1[3] * q2[1] - q1[0] * q2[2] + q1[1] * q2[3] + q1[2] * q2[0])) + ASSERT(test_eq(q3[2], q1[3] * q2[2] + q1[0] * q2[1] - q1[1] * q2[0] + q1[2] * q2[3])) + ASSERT(test_eq(q3[3], q1[3] * q2[3] - q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2])) + + glm_quatv(q1, glm_rad(30.0f), v1); + glm_quatv(q2, glm_rad(20.0f), v1); + glm_quatv(q3, glm_rad(50.0f), v1); + + GLM(quat_mul)(q1, q2, q4); + + ASSERTIFY(test_assert_quat_eq(q3, q4)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_mat4) { + mat4 m1, m2; + versor q1, q2, q3; + vec3 axis1; + vec3 axis2 = {1.9f, 2.3f, 4.5f}; + int i; + + GLM(quat)(q1, GLM_PI_4f, 1.9f, 2.3f, 4.5f); + GLM(quat_mat4)(q1, m1); + GLM(mat4_quat)(m1, q2); + + GLM(rotate_make)(m2, GLM_PI_4f, axis2); + GLM(mat4_quat)(m1, q3); + + GLM(quat_axis)(q3, axis1); + + GLM(vec3_normalize)(axis1); + GLM(vec3_normalize)(axis2); + + ASSERT(test_eq(glm_quat_angle(q3), GLM_PI_4f)) + ASSERTIFY(test_assert_vec3_eq(axis1, axis2)) + ASSERTIFY(test_assert_vec4_eq(q1, q2)) + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + ASSERTIFY(test_assert_vec4_eq(q1, q3)) + + /* 1. test quat to mat and mat to quat */ + for (i = 0; i < 1000; i++) { + test_rand_quat(q1); + + GLM(quat_mat4)(q1, m1); + GLM(mat4_quat)(m1, q2); + GLM(quat_mat4)(q2, m2); + + /* 2. test first quat and generated one equality */ + ASSERTIFY(test_assert_quat_eq_abs(q1, q2)); + + /* 3. test first rot and second rotation */ + /* almost equal */ + ASSERTIFY(test_assert_mat4_eq2(m1, m2, 0.000009f)); + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_mat4t) { + mat4 m1, m2; + versor q1, q2, q3; + vec3 axis1; + vec3 axis2 = {1.9f, 2.3f, 4.5f}; + int i; + + GLM(quat)(q1, GLM_PI_4f, 1.9f, 2.3f, 4.5f); + + GLM(quat_mat4t)(q1, m1); + glm_mat4_transpose(m1); + + GLM(mat4_quat)(m1, q2); + + GLM(rotate_make)(m2, GLM_PI_4f, axis2); + GLM(mat4_quat)(m1, q3); + + GLM(quat_axis)(q3, axis1); + + GLM(vec3_normalize)(axis1); + GLM(vec3_normalize)(axis2); + + ASSERT(test_eq(glm_quat_angle(q3), GLM_PI_4f)) + ASSERTIFY(test_assert_vec3_eq(axis1, axis2)) + ASSERTIFY(test_assert_vec4_eq(q1, q2)) + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + ASSERTIFY(test_assert_vec4_eq(q1, q3)) + + /* 1. test quat to mat and mat to quat */ + for (i = 0; i < 1000; i++) { + test_rand_quat(q1); + + GLM(quat_mat4t)(q1, m1); + glm_mat4_transpose(m1); + + GLM(mat4_quat)(m1, q2); + + GLM(quat_mat4t)(q2, m2); + glm_mat4_transpose(m2); + + /* 2. test first quat and generated one equality */ + ASSERTIFY(test_assert_quat_eq_abs(q1, q2)); + + /* 3. test first rot and second rotation */ + /* almost equal */ + ASSERTIFY(test_assert_mat4_eq2(m1, m2, 0.000009f)); + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_mat3) { + mat4 m1, m2; + mat3 m3; + versor q1, q2, q3; + vec3 axis1; + vec3 axis2 = {1.9f, 2.3f, 4.5f}; + int i; + + GLM(quat)(q1, GLM_PI_4f, 1.9f, 2.3f, 4.5f); + GLM(quat_mat3)(q1, m3); + glm_mat4_identity(m1); + glm_mat4_ins3(m3, m1); + + GLM(mat4_quat)(m1, q2); + + GLM(rotate_make)(m2, GLM_PI_4f, axis2); + GLM(mat4_quat)(m1, q3); + + GLM(quat_axis)(q3, axis1); + + GLM(vec3_normalize)(axis1); + GLM(vec3_normalize)(axis2); + + ASSERT(test_eq(glm_quat_angle(q3), GLM_PI_4f)) + ASSERTIFY(test_assert_vec3_eq(axis1, axis2)) + ASSERTIFY(test_assert_vec4_eq(q1, q2)) + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + ASSERTIFY(test_assert_vec4_eq(q1, q3)) + + /* 1. test quat to mat and mat to quat */ + for (i = 0; i < 1000; i++) { + test_rand_quat(q1); + + GLM(quat_mat3)(q1, m3); + glm_mat4_identity(m1); + glm_mat4_ins3(m3, m1); + + GLM(mat4_quat)(m1, q2); + + GLM(quat_mat3)(q2, m3); + glm_mat4_identity(m2); + glm_mat4_ins3(m3, m2); + + /* 2. test first quat and generated one equality */ + ASSERTIFY(test_assert_quat_eq_abs(q1, q2)); + + /* 3. test first rot and second rotation */ + /* almost equal */ + ASSERTIFY(test_assert_mat4_eq2(m1, m2, 0.000009f)); + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_mat3t) { + mat4 m1, m2; + mat3 m3; + versor q1, q2, q3; + vec3 axis1; + vec3 axis2 = {1.9f, 2.3f, 4.5f}; + int i; + + GLM(quat)(q1, GLM_PI_4f, 1.9f, 2.3f, 4.5f); + + GLM(quat_mat3t)(q1, m3); + glm_mat3_transpose(m3); + glm_mat4_identity(m1); + glm_mat4_ins3(m3, m1); + + GLM(mat4_quat)(m1, q2); + + GLM(rotate_make)(m2, GLM_PI_4f, axis2); + GLM(mat4_quat)(m1, q3); + + GLM(quat_axis)(q3, axis1); + + GLM(vec3_normalize)(axis1); + GLM(vec3_normalize)(axis2); + + ASSERT(test_eq(glm_quat_angle(q3), GLM_PI_4f)) + ASSERTIFY(test_assert_vec3_eq(axis1, axis2)) + ASSERTIFY(test_assert_vec4_eq(q1, q2)) + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + ASSERTIFY(test_assert_vec4_eq(q1, q3)) + + /* 1. test quat to mat and mat to quat */ + for (i = 0; i < 1000; i++) { + test_rand_quat(q1); + + GLM(quat_mat3t)(q1, m3); + glm_mat3_transpose(m3); + glm_mat4_identity(m1); + glm_mat4_ins3(m3, m1); + + GLM(mat4_quat)(m1, q2); + + GLM(quat_mat3t)(q2, m3); + glm_mat3_transpose(m3); + glm_mat4_identity(m2); + glm_mat4_ins3(m3, m2); + + /* 2. test first quat and generated one equality */ + ASSERTIFY(test_assert_quat_eq_abs(q1, q2)); + + /* 3. test first rot and second rotation */ + /* almost equal */ + ASSERTIFY(test_assert_mat4_eq2(m1, m2, 0.000009f)); + } + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_lerp) { + versor v1 = {-100.0f, -200.0f, -10.0f, -10.0f}; + versor v2 = {100.0f, 200.0f, 10.0f, 10.0f}; + versor v3; + + GLM(quat_lerp)(v1, v2, 0.5f, v3); + ASSERT(test_eq(v3[0], 0.0f)) + ASSERT(test_eq(v3[1], 0.0f)) + ASSERT(test_eq(v3[2], 0.0f)) + ASSERT(test_eq(v3[3], 0.0f)) + + GLM(quat_lerp)(v1, v2, 0.75f, v3); + ASSERT(test_eq(v3[0], 50.0f)) + ASSERT(test_eq(v3[1], 100.0f)) + ASSERT(test_eq(v3[2], 5.0f)) + ASSERT(test_eq(v3[3], 5.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_lerpc) { + versor v1 = {-100.0f, -200.0f, -10.0f, -10.0f}; + versor v2 = {100.0f, 200.0f, 10.0f, 10.0f}; + versor v3; + + GLM(quat_lerpc)(v1, v2, 0.5f, v3); + ASSERT(test_eq(v3[0], 0.0f)) + ASSERT(test_eq(v3[1], 0.0f)) + ASSERT(test_eq(v3[2], 0.0f)) + ASSERT(test_eq(v3[3], 0.0f)) + + GLM(quat_lerpc)(v1, v2, 0.75f, v3); + ASSERT(test_eq(v3[0], 50.0f)) + ASSERT(test_eq(v3[1], 100.0f)) + ASSERT(test_eq(v3[2], 5.0f)) + ASSERT(test_eq(v3[3], 5.0f)) + + GLM(quat_lerpc)(v1, v2, -1.75f, v3); + ASSERT(test_eq(v3[0], -100.0f)) + ASSERT(test_eq(v3[1], -200.0f)) + ASSERT(test_eq(v3[2], -10.0f)) + ASSERT(test_eq(v3[3], -10.0f)) + + GLM(quat_lerpc)(v1, v2, 1.75f, v3); + ASSERT(test_eq(v3[0], 100.0f)) + ASSERT(test_eq(v3[1], 200.0f)) + ASSERT(test_eq(v3[2], 10.0f)) + ASSERT(test_eq(v3[3], 10.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_slerp) { + versor q1, q2, q3, q4; + vec3 v1 = {10.0f, 0.0f, 0.0f}, v2; + + glm_quatv(q1, glm_rad(30.0f), v1); + glm_quatv(q2, glm_rad(90.0f), v1); + + q1[0] = 10.0f; + GLM(quat_slerp)(q1, q2, 1.0f, q3); + ASSERTIFY(test_assert_quat_eq(q1, q3)); + + glm_quatv(q1, glm_rad(30.001f), v1); + glm_quatv(q2, glm_rad(30.002f), v1); + GLM(quat_slerp)(q1, q2, 0.7f, q3); + glm_quat_lerp(q1, q2, 0.7f, q4); + ASSERTIFY(test_assert_quat_eq(q3, q4)); + + glm_quatv(q1, glm_rad(30.0f), v1); + glm_quatv(q2, glm_rad(90.0f), v1); + GLM(quat_slerp)(q1, q2, 0.5f, q3); + + glm_quat_axis(q3, v2); + glm_vec3_normalize(v1); + glm_vec3_normalize(v2); + + ASSERT(glm_quat_angle(q3) > glm_rad(30.0f)); + ASSERT(glm_quat_angle(q3) < glm_rad(90.0f)); + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_look) { + versor q1; + vec3 v1 = {0.0f, 1.0f, 0.0f}; + mat4 m1, m2; + + glm_quat(q1, glm_rad(90.0f), 0.0f, 1.0f, 0.0f); + GLM(quat_look)(v1, q1, m1); + + glm_look(v1, (vec3){-1.0f, 0.0f, 0.0f}, GLM_YUP, m2); + ASSERTIFY(test_assert_mat4_eq(m1, m2)); + + glm_quat(q1, glm_rad(180.0f), 1.0f, 0.0f, 0.0f); + GLM(quat_look)(v1, q1, m1); + + glm_look(v1, (vec3){0.0f, 0.0f, 1.0f}, (vec3){0.0f, -1.0f, 0.0f}, m2); + + ASSERTIFY(test_assert_mat4_eq(m1, m2)); + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_for) { + versor q1, q2; + + glm_quat(q1, glm_rad(90.0f), 0.0f, 1.0f, 0.0f); + GLM(quat_for)((vec3){-1.0f, 0.0f, 0.0f}, (vec3){0.0f, 1.0f, 0.0f}, q2); + ASSERTIFY(test_assert_quat_eq(q1, q2)); + + glm_quat(q2, glm_rad(90.0f), 1.0f, 0.0f, 0.0f); + GLM(quat_for)((vec3){0.0f, 1.0f, 0.0f}, (vec3){0.0f, 0.0f, 1.0f}, q1); + ASSERTIFY(test_assert_quat_eq(q1, q2)); + + glm_quat(q2, glm_rad(180.0f), 1.0f, 0.0f, 0.0f); + GLM(quat_for)((vec3){0.0f, 0.0f, 1.0f}, (vec3){0.0f, -1.0f, 0.0f}, q1); + ASSERTIFY(test_assert_quat_eq(q1, q2)); + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_forp) { + versor q1, q2; + + glm_quat(q1, glm_rad(90.0f), 0.0f, 1.0f, 0.0f); + GLM(quat_forp)((vec3){2.0f, 0.0f, 0.0f}, + (vec3){1.0f, 0.0f, 0.0f}, + (vec3){0.0f, 1.0f, 0.0f}, + q2); + ASSERTIFY(test_assert_quat_eq(q1, q2)); + + glm_quat(q2, glm_rad(90.0f), 1.0f, 0.0f, 0.0f); + GLM(quat_forp)((vec3){0.0f, 1.0f, 0.0f}, + (vec3){0.0f, 2.0f, 0.0f}, + (vec3){0.0f, 0.0f, 1.0f}, + q1); + ASSERTIFY(test_assert_quat_eq(q1, q2)); + + glm_quat(q2, glm_rad(180.0f), 1.0f, 0.0f, 0.0f); + GLM(quat_forp)((vec3){0.0f, 1.0f, 1.0f}, + (vec3){0.0f, 1.0f, 2.0f}, + (vec3){0.0f, -1.0f, 0.0f}, + q1); + ASSERTIFY(test_assert_quat_eq(q1, q2)); + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_rotatev) { + vec3 v1 = {1.0f, 0.0f, 0.0f}, v2 = {1.0f, 1.0f, 1.0f}; + versor q; + + /* rotate X around Y = -Z */ + glm_quatv(q, GLM_PI_2f, GLM_YUP); + GLM(quat_rotatev)(q, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -1.0f)) + + /* rotate -Z around X = Y */ + glm_quatv(q, GLM_PI_2f, GLM_XUP); + GLM(quat_rotatev)(q, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 1.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + /* rotate Y around Z = -X */ + glm_quatv(q, GLM_PI_2f, GLM_ZUP); + GLM(quat_rotatev)(q, v1, v1); + + ASSERT(test_eq(v1[0], -1.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + /* rotate v2 around Y by 90deg */ + glm_quatv(q, GLM_PI_2f, GLM_YUP); + GLM(quat_rotatev)(q, v2, v2); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], -1.0f)) + + /* rotate v2 around Y by 90deg */ + glm_quatv(q, GLM_PI_2f, GLM_YUP); + GLM(quat_rotatev)(q, v2, v2); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], -1.0f)) + + /* rotate v2 around Y by 90deg */ + glm_quatv(q, GLM_PI_2f, GLM_YUP); + GLM(quat_rotatev)(q, v2, v2); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + /* rotate v2 around X by 90deg */ + glm_quatv(q, GLM_PI_2f, GLM_XUP); + GLM(quat_rotatev)(q, v2, v2); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], -1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + /* rotate v2 around Z by 90deg */ + glm_quatv(q, GLM_PI_2f, GLM_ZUP); + GLM(quat_rotatev)(q, v2, v2); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], -1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_rotate) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT, m2; + versor q1; + vec4 v1 = {1.0f, 0.0f, 0.0f, 1.0f}; + + /* rotate X around Y = -Z */ + glm_quatv(q1, GLM_PI_2f, GLM_YUP); + GLM(quat_rotate)(m1, q1, m1); + glm_rotate_make(m2, GLM_PI_2f, GLM_YUP); + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -1.0f)) + + glm_mat4_identity(m1); + glm_mat4_identity(m2); + + /* rotate -Z around X = Y */ + glm_quatv(q1, GLM_PI_2f, GLM_XUP); + GLM(quat_rotate)(m1, q1, m1); + glm_rotate(m2, GLM_PI_2f, GLM_XUP); + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 1.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + glm_mat4_identity(m1); + glm_mat4_identity(m2); + + /* rotate Y around X = +Z */ + glm_quatv(q1, GLM_PI_2f, GLM_XUP); + GLM(quat_rotate)(m1, q1, m1); + glm_rotate(m2, GLM_PI_2f, GLM_XUP); + ASSERTIFY(test_assert_mat4_eq(m1, m2)) + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_rotate_at) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + versor q1; + vec4 v1 = {1.0f, 0.0f, 0.0f, 1.0f}; + + glm_quatv(q1, GLM_PI_2f, GLM_YUP); + GLM(quat_rotate_at)(m1, q1, (vec3){0.5f, 0.0f, 0.0f}); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.5f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -0.5f)) + + glm_mat4_identity(m1); + + glm_quatv(q1, GLM_PI_2f, GLM_ZUP); + GLM(quat_rotate_at)(m1, q1, (vec3){0.0f, 0.0f, 0.0f}); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.5f)) + ASSERT(test_eq(v1[2], -0.5f)) + + glm_mat4_identity(m1); + + v1[0] = 1.0f; + v1[1] = 1.0f; + v1[2] = 1.0f; + + glm_quatv(q1, GLM_PI_2f, GLM_XUP); + GLM(quat_rotate_at)(m1, q1, GLM_VEC3_ZERO); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 1.0f)) + ASSERT(test_eq(v1[1], -1.0f)) + ASSERT(test_eq(v1[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, quat_rotate_atm) { + mat4 m1 = GLM_MAT4_IDENTITY_INIT; + versor q1; + vec4 v1 = {1.0f, 0.0f, 0.0f, 1.0f}; + + glm_quatv(q1, GLM_PI_2f, GLM_YUP); + GLM(quat_rotate_atm)(m1, q1, (vec3){0.5f, 0.0f, 0.0f}); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.5f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -0.5f)) + + glm_quatv(q1, GLM_PI_2f, GLM_ZUP); + GLM(quat_rotate_atm)(m1, q1, (vec3){0.0f, 0.0f, 0.0f}); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.5f)) + ASSERT(test_eq(v1[2], -0.5f)) + + v1[0] = 1.0f; + v1[1] = 1.0f; + v1[2] = 1.0f; + + glm_quatv(q1, GLM_PI_2f, GLM_XUP); + GLM(quat_rotate_atm)(m1, q1, GLM_VEC3_ZERO); + glm_mat4_mulv(m1, v1, v1); + + ASSERT(test_eq(v1[0], 1.0f)) + ASSERT(test_eq(v1[1], -1.0f)) + ASSERT(test_eq(v1[2], 1.0f)) + + TEST_SUCCESS +} diff --git a/test/src/test_struct.c b/test/src/test_struct.c new file mode 100644 index 0000000..9c1126f --- /dev/null +++ b/test/src/test_struct.c @@ -0,0 +1,78 @@ +/* +* Copyright (c), Recep Aslantas. +* +* MIT License (MIT), http://opensource.org/licenses/MIT +* Full license can be found in the LICENSE file +*/ + +#include "test_common.h" + +TEST_IMPL(mat3s_identity_init) { + mat3s mat3_identity = GLMS_MAT3_IDENTITY_INIT; + mat3 mat3_identity_a = GLM_MAT3_IDENTITY_INIT; + test_assert_mat3_eq(mat3_identity.raw, mat3_identity_a); + TEST_SUCCESS +} + +TEST_IMPL(mat3s_zero_init) { + mat3s mat3_zero = GLMS_MAT3_ZERO_INIT; + mat3 mat3_zero_a = GLM_MAT3_ZERO_INIT; + test_assert_mat3_eq(mat3_zero.raw, mat3_zero_a); + TEST_SUCCESS +} + +TEST_IMPL(mat4s_identity_init) { + mat4s mat4_identity = GLMS_MAT4_IDENTITY_INIT; + mat4 mat4_identity_a = GLM_MAT4_IDENTITY_INIT; + test_assert_mat4_eq(mat4_identity.raw, mat4_identity_a); + TEST_SUCCESS +} + +TEST_IMPL(mat4s_zero_init) { + mat4s mat4_zero = GLMS_MAT4_ZERO_INIT; + mat4 mat4_zero_a = GLM_MAT4_ZERO_INIT; + test_assert_mat4_eq(mat4_zero.raw, mat4_zero_a); + TEST_SUCCESS +} + +TEST_IMPL(quats_zero_init) { + versors quat_zero = GLMS_QUAT_IDENTITY_INIT; + versor quat_zero_a = GLM_QUAT_IDENTITY_INIT; + test_assert_quat_eq(quat_zero.raw, quat_zero_a); + TEST_SUCCESS +} + +TEST_IMPL(vec3s_one_init) { + vec3s vec3_one = GLMS_VEC3_ONE_INIT; + vec3 vec3_one_a = GLM_VEC3_ONE_INIT; + test_assert_vec3_eq(vec3_one.raw, vec3_one_a); + TEST_SUCCESS +} + +TEST_IMPL(vec3s_zero_init) { + vec3s vec3_zero = GLMS_VEC3_ZERO_INIT; + vec3 vec3_zero_a = GLM_VEC3_ZERO_INIT; + test_assert_vec3_eq(vec3_zero.raw, vec3_zero_a); + TEST_SUCCESS +} + +TEST_IMPL(vec4s_black_init) { + vec4s vec4_black = GLMS_VEC4_BLACK_INIT; + vec4 vec4_black_a = GLM_VEC4_BLACK_INIT; + test_assert_vec4_eq(vec4_black.raw, vec4_black_a); + TEST_SUCCESS +} + +TEST_IMPL(vec4s_one_init) { + vec4s vec4_one = GLMS_VEC4_ONE_INIT; + vec4 vec4_one_a = GLM_VEC4_ONE_INIT; + test_assert_vec4_eq(vec4_one.raw, vec4_one_a); + TEST_SUCCESS +} + +TEST_IMPL(vec4s_zero_init) { + vec4s vec4_zero = GLMS_VEC4_ZERO_INIT; + vec4 vec4_zero_a = GLM_VEC4_ZERO_INIT; + test_assert_vec4_eq(vec4_zero.raw, vec4_zero_a); + TEST_SUCCESS +} diff --git a/test/src/test_vec3.c b/test/src/test_vec3.c deleted file mode 100644 index 19af4aa..0000000 --- a/test/src/test_vec3.c +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (c), Recep Aslantas. - * - * MIT License (MIT), http://opensource.org/licenses/MIT - * Full license can be found in the LICENSE file - */ - -#include "test_common.h" - -void -test_vec3(void **state) { - mat3 rot1m3; - mat4 rot1; - vec3 v, v1, v2; - vec3s vs1, vs2, vs3, vs4; - - /* test zero */ - glm_vec3_zero(v); - test_assert_vec3_eq(GLM_VEC3_ZERO, v); - - /* test one */ - glm_vec3_one(v); - test_assert_vec3_eq(GLM_VEC3_ONE, v); - - /* adds, subs, div, divs, mul */ - glm_vec3_add(v, GLM_VEC3_ONE, v); - assert_true(glmc_vec3_eq_eps(v, 2)); - - glm_vec3_adds(v, 10, v); - assert_true(glmc_vec3_eq_eps(v, 12)); - - glm_vec3_sub(v, GLM_VEC3_ONE, v); - assert_true(glmc_vec3_eq_eps(v, 11)); - - glm_vec3_subs(v, 1, v); - assert_true(glmc_vec3_eq_eps(v, 10)); - - glm_vec3_broadcast(2, v1); - glm_vec3_div(v, v1, v); - assert_true(glmc_vec3_eq_eps(v, 5)); - - glm_vec3_divs(v, 0.5, v); - assert_true(glmc_vec3_eq_eps(v, 10)); - - glm_vec3_mul(v, v1, v); - assert_true(glmc_vec3_eq_eps(v, 20)); - - glm_vec3_scale(v, 0.5, v); - assert_true(glmc_vec3_eq_eps(v, 10)); - - glm_vec3_normalize_to(v, v1); - glm_vec3_scale(v1, 0.8, v1); - glm_vec3_scale_as(v, 0.8, v); - test_assert_vec3_eq(v1, v); - - /* addadd, subadd, muladd */ - glm_vec3_one(v); - - glm_vec3_addadd(GLM_VEC3_ONE, GLM_VEC3_ONE, v); - assert_true(glmc_vec3_eq_eps(v, 3)); - - glm_vec3_subadd(GLM_VEC3_ONE, GLM_VEC3_ZERO, v); - assert_true(glmc_vec3_eq_eps(v, 4)); - - glm_vec3_broadcast(2, v1); - glm_vec3_broadcast(3, v2); - glm_vec3_muladd(v1, v2, v); - assert_true(glmc_vec3_eq_eps(v, 10)); - - /* rotate */ - glm_vec3_copy(GLM_YUP, v); - glm_rotate_make(rot1, glm_rad(90), GLM_XUP); - glm_vec3_rotate_m4(rot1, v, v1); - glm_mat4_pick3(rot1, rot1m3); - glm_vec3_rotate_m3(rot1m3, v, v2); - - test_assert_vec3_eq(v1, v2); - test_assert_vec3_eq(v1, GLM_ZUP); - - /* structs */ - vs1 = test_rand_vec3s(); - vs2 = test_rand_vec3s(); - - vs3 = glms_vec3_add(vs1, vs2); - vs4 = glms_vec3_maxv(vs1, vs3); - test_assert_vec3s_eq(vs3, vs4); - - /* swizzle */ - - /* ZYX */ - v1[0] = 1; - v1[1] = 2; - v1[2] = 3; - glm_vec3_swizzle(v1, GLM_ZYX, v1); - test_assert_vec3_eq(v1, (vec3){3, 2, 1}); - - glm_vec3_swizzle(v1, GLM_XXX, v1); - test_assert_vec3_eq(v1, (vec3){3, 3, 3}); - - v1[0] = 1; - v1[1] = 2; - v1[2] = 3; - - glm_vec3_swizzle(v1, GLM_YYY, v1); - test_assert_vec3_eq(v1, (vec3){2, 2, 2}); - - v1[0] = 1; - v1[1] = 2; - v1[2] = 3; - - glm_vec3_swizzle(v1, GLM_ZZZ, v1); - test_assert_vec3_eq(v1, (vec3){3, 3, 3}); -} diff --git a/test/src/test_vec3.h b/test/src/test_vec3.h new file mode 100644 index 0000000..0b6dc4d --- /dev/null +++ b/test/src/test_vec3.h @@ -0,0 +1,1726 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#include "test_common.h" + +#define TEST_GLM_SHUFFLE3(z, y, x) (((z) << 4) | ((y) << 2) | (x)) + +#ifndef CGLM_TEST_VEC3_ONCE +#define CGLM_TEST_VEC3_ONCE + +/* Macros */ + +TEST_IMPL(MACRO_GLM_VEC3_ONE_INIT) { + vec3 v = GLM_VEC3_ONE_INIT; + + ASSERT(test_eq(v[0], 1.0f)) + ASSERT(test_eq(v[1], 1.0f)) + ASSERT(test_eq(v[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_VEC3_ZERO_INIT) { + vec3 v = GLM_VEC3_ZERO_INIT; + + ASSERT(test_eq(v[0], 0.0f)) + ASSERT(test_eq(v[1], 0.0f)) + ASSERT(test_eq(v[2], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_VEC3_ONE) { + ASSERT(test_eq(GLM_VEC3_ONE[0], 1.0f)) + ASSERT(test_eq(GLM_VEC3_ONE[1], 1.0f)) + ASSERT(test_eq(GLM_VEC3_ONE[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_VEC3_ZERO) { + ASSERT(test_eq(GLM_VEC3_ZERO[0], 0.0f)) + ASSERT(test_eq(GLM_VEC3_ZERO[1], 0.0f)) + ASSERT(test_eq(GLM_VEC3_ZERO[2], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_YUP) { + ASSERT(test_eq(GLM_YUP[0], 0.0f)) + ASSERT(test_eq(GLM_YUP[1], 1.0f)) + ASSERT(test_eq(GLM_YUP[2], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_ZUP) { + ASSERT(test_eq(GLM_ZUP[0], 0.0f)) + ASSERT(test_eq(GLM_ZUP[1], 0.0f)) + ASSERT(test_eq(GLM_ZUP[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_XUP) { + ASSERT(test_eq(GLM_XUP[0], 1.0f)) + ASSERT(test_eq(GLM_XUP[1], 0.0f)) + ASSERT(test_eq(GLM_XUP[2], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_FORWARD_RH) { + ASSERT(test_eq(GLM_FORWARD[0], 0.0f)) + ASSERT(test_eq(GLM_FORWARD[1], 0.0f)) + ASSERT(test_eq(GLM_FORWARD[2], -1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_SHUFFLE3) { + ASSERT(TEST_GLM_SHUFFLE3(1, 0, 0) == GLM_SHUFFLE3(1, 0, 0)) + ASSERT(TEST_GLM_SHUFFLE3(0, 1, 0) == GLM_SHUFFLE3(0, 1, 0)) + ASSERT(TEST_GLM_SHUFFLE3(0, 0, 1) == GLM_SHUFFLE3(0, 0, 1)) + ASSERT(TEST_GLM_SHUFFLE3(1, 0, 0) == GLM_SHUFFLE3(1, 0, 0)) + ASSERT(TEST_GLM_SHUFFLE3(1, 0, 1) == GLM_SHUFFLE3(1, 0, 1)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_XXX) { + ASSERT(TEST_GLM_SHUFFLE3(0, 0, 0) == GLM_XXX) + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_YYY) { + ASSERT(TEST_GLM_SHUFFLE3(1, 1, 1) == GLM_YYY) + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_ZZZ) { + ASSERT(TEST_GLM_SHUFFLE3(2, 2, 2) == GLM_ZZZ) + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_ZYX) { + ASSERT(TEST_GLM_SHUFFLE3(0, 1, 2) == GLM_ZYX) + TEST_SUCCESS +} + +/* Deprecated */ + +TEST_IMPL(MACRO_glm_vec3_dup) { + vec3 v1 = {13.0f, 12.0f, 11.0f}, v2; + + glm_vec3_dup(v1, v2); + + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec3_flipsign) { + vec3 v1 = {13.0f, -12.0f, 11.0f}, + v2 = {13.0f, -12.0f, 11.0f}, + v3 = {-13.0f, 12.0f, -11.0f}; + + glm_vec3_flipsign(v1); + glmc_vec3_flipsign(v2); + + ASSERTIFY(test_assert_vec3_eq(v1, v3)) + ASSERTIFY(test_assert_vec3_eq(v2, v3)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec3_flipsign_to) { + vec3 v1 = {13.0f, -12.0f, 11.0f}, + v2 = {-13.0f, 12.0f, -11.0f}, + v3, v4; + + glm_vec3_flipsign_to(v1, v3); + glmc_vec3_flipsign_to(v1, v4); + + ASSERTIFY(test_assert_vec3_eq(v2, v3)) + ASSERTIFY(test_assert_vec3_eq(v2, v4)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec3_inv) { + vec3 v1 = {13.0f, -12.0f, 11.0f}, + v2 = {13.0f, -12.0f, 11.0f}, + v3 = {-13.0f, 12.0f, -11.0f}; + + glm_vec3_inv(v1); + glmc_vec3_inv(v2); + + ASSERTIFY(test_assert_vec3_eq(v1, v3)) + ASSERTIFY(test_assert_vec3_eq(v2, v3)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec3_inv_to) { + vec3 v1 = {13.0f, -12.0f, 11.0f}, + v2 = {-13.0f, 12.0f, -11.0f}, + v3, v4; + + glm_vec3_inv_to(v1, v3); + glmc_vec3_inv_to(v1, v4); + + ASSERTIFY(test_assert_vec3_eq(v3, v4)) + ASSERTIFY(test_assert_vec3_eq(v2, v3)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec3_mulv) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3, v4; + + glm_vec3_mulv(v1, v2, v3); + glmc_vec3_mulv(v1, v2, v4); + + ASSERTIFY(test_assert_vec3_eq(v3, v4)) + + ASSERT(test_eq(v1[0] * v2[0], v3[0])) + ASSERT(test_eq(v1[1] * v2[1], v3[1])) + ASSERT(test_eq(v1[2] * v2[2], v3[2])) + + TEST_SUCCESS +} + +#endif /* CGLM_TEST_VEC3_ONCE */ + +/* --- */ + +TEST_IMPL(GLM_PREFIX, vec3) { + vec4 v4 = {10.0f, 9.0f, 8.0f, 7.0f}; + vec3 v3; + + GLM(vec3)(v4, v3); + + ASSERT(test_eq(v3[0], v4[0])) + ASSERT(test_eq(v3[1], v4[1])) + ASSERT(test_eq(v3[2], v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_copy) { + vec3 v1 = {10.0f, 9.0f, 8.0f}; + vec3 v2 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_copy)(v1, v2); + + ASSERTIFY(test_assert_vec3_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_zero) { + vec3 v1 = {10.0f, 9.0f, 8.0f}; + vec3 v2 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_zero)(v1); + GLM(vec3_zero)(v2); + + ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC3_ZERO)) + ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC3_ZERO)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_one) { + vec3 v1 = {10.0f, 9.0f, 8.0f}; + vec3 v2 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_one)(v1); + GLM(vec3_one)(v2); + + ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC3_ONE)) + ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC3_ONE)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_dot) { + vec3 a = {10.0f, 9.0f, 8.0f}; + vec3 b = {1.0f, 2.0f, 3.0f}; + float dot1, dot2; + + dot1 = GLM(vec3_dot)(a, b); + dot2 = a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; + + ASSERT(test_eq(dot1, dot2)) + + TEST_SUCCESS +} + + +TEST_IMPL(GLM_PREFIX, dot) { + /* SAME AS VEC3_DOT */ + + vec3 a = {10.0f, 9.0f, 8.0f}; + vec3 b = {1.0f, 2.0f, 3.0f}; + float dot1, dot2; + + dot1 = GLM(vec3_dot)(a, b); + dot2 = a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; + + ASSERT(test_eq(dot1, dot2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_norm2) { + vec3 a = {10.0f, 9.0f, 8.0f}; + float n1, n2; + + n1 = GLM(vec3_norm2)(a); + n2 = a[0] * a[0] + a[1] * a[1] + a[2] * a[2]; + + ASSERT(test_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_norm) { + vec3 a = {10.0f, 9.0f, 8.0f}; + float n1, n2; + + n1 = GLM(vec3_norm)(a); + n2 = sqrtf(a[0] * a[0] + a[1] * a[1] + a[2] * a[2]); + + ASSERT(test_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_norm_one) { + vec3 a = {-10.0f, 9.0f, -8.0f}; + float n1, n2; + + n1 = GLM(vec3_norm_one)(a); + n2 = fabsf(a[0]) + fabsf(a[1]) + fabsf(a[2]); + + ASSERT(test_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_norm_inf) { + vec3 a = {-10.0f, 9.0f, -8.0f}; + float n1, n2; + + n1 = GLM(vec3_norm_inf)(a); + n2 = fabsf(a[0]); + + if (n2 < fabsf(a[1])) + n2 = fabsf(a[1]); + + if (n2 < fabsf(a[2])) + n2 = fabsf(a[2]); + + ASSERT(test_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_add) { + vec4 a = {-10.0f, 9.0f, -8.0f}; + vec4 b = {12.0f, 19.0f, -18.0f}; + vec4 c, d; + + c[0] = a[0] + b[0]; + c[1] = a[1] + b[1]; + c[2] = a[2] + b[2]; + + GLM(vec3_add)(a, b, d); + + ASSERTIFY(test_assert_vec3_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_adds) { + vec4 a = {-10.0f, 9.0f, -8.0f}; + vec4 c, d; + float s = 7.0f; + + c[0] = a[0] + s; + c[1] = a[1] + s; + c[2] = a[2] + s; + + GLM(vec3_adds)(a, s, d); + + ASSERTIFY(test_assert_vec3_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_sub) { + vec4 a = {-10.0f, 9.0f, -8.0f}; + vec4 b = {12.0f, 19.0f, -18.0f}; + vec4 c, d; + + c[0] = a[0] - b[0]; + c[1] = a[1] - b[1]; + c[2] = a[2] - b[2]; + + GLM(vec3_sub)(a, b, d); + + ASSERTIFY(test_assert_vec3_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_subs) { + vec4 a = {-10.0f, 9.0f, -8.0f}; + vec4 c, d; + float s = 7.0f; + + c[0] = a[0] - s; + c[1] = a[1] - s; + c[2] = a[2] - s; + + GLM(vec3_subs)(a, s, d); + + ASSERTIFY(test_assert_vec3_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_mul) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3; + + GLM(vec3_mul)(v1, v2, v3); + + ASSERT(test_eq(v1[0] * v2[0], v3[0])) + ASSERT(test_eq(v1[1] * v2[1], v3[1])) + ASSERT(test_eq(v1[2] * v2[2], v3[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_scale) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2; + float s = 7.0f; + + GLM(vec3_scale)(v1, s, v2); + + ASSERT(test_eq(v1[0] * s, v2[0])) + ASSERT(test_eq(v1[1] * s, v2[1])) + ASSERT(test_eq(v1[2] * s, v2[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_scale_as) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2; + float s = 7.0f; + float norm; + + GLM(vec3_scale_as)(v1, s, v2); + + norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2]); + if (norm == 0.0f) { + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(test_eq(v1[0] * norm, v2[0])) + ASSERT(test_eq(v1[1] * norm, v2[1])) + ASSERT(test_eq(v1[2] * norm, v2[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_div) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3; + + GLM(vec3_div)(v1, v2, v3); + + ASSERT(test_eq(v1[0] / v2[0], v3[0])) + ASSERT(test_eq(v1[1] / v2[1], v3[1])) + ASSERT(test_eq(v1[2] / v2[2], v3[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_divs) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2; + float s = 7.0f; + + GLM(vec3_divs)(v1, s, v2); + + ASSERT(test_eq(v1[0] / s, v2[0])) + ASSERT(test_eq(v1[1] / s, v2[1])) + ASSERT(test_eq(v1[2] / s, v2[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_addadd) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {1.0f, 2.0f, 3.0f}, + v4 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_addadd)(v1, v2, v4); + + ASSERT(test_eq(v3[0] + v1[0] + v2[0], v4[0])) + ASSERT(test_eq(v3[1] + v1[1] + v2[1], v4[1])) + ASSERT(test_eq(v3[2] + v1[2] + v2[2], v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_subadd) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {1.0f, 2.0f, 3.0f}, + v4 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_subadd)(v1, v2, v4); + + ASSERT(test_eq(v3[0] + v1[0] - v2[0], v4[0])) + ASSERT(test_eq(v3[1] + v1[1] - v2[1], v4[1])) + ASSERT(test_eq(v3[2] + v1[2] - v2[2], v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_muladd) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {1.0f, 2.0f, 3.0f}, + v4 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_muladd)(v1, v2, v4); + + ASSERT(test_eq(v3[0] + v1[0] * v2[0], v4[0])) + ASSERT(test_eq(v3[1] + v1[1] * v2[1], v4[1])) + ASSERT(test_eq(v3[2] + v1[2] * v2[2], v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_muladds) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {1.0f, 2.0f, 3.0f}, + v3 = {1.0f, 2.0f, 3.0f}; + float s = 9.0f; + + GLM(vec3_muladds)(v1, s, v3); + + ASSERT(test_eq(v2[0] + v1[0] * s, v3[0])) + ASSERT(test_eq(v2[1] + v1[1] * s, v3[1])) + ASSERT(test_eq(v2[2] + v1[2] * s, v3[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_maxadd) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {1.0f, 2.0f, 3.0f}, + v4 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_maxadd)(v1, v2, v4); + + ASSERT(test_eq(v3[0] + glm_max(v1[0], v2[0]), v4[0])) + ASSERT(test_eq(v3[1] + glm_max(v1[1], v2[1]), v4[1])) + ASSERT(test_eq(v3[2] + glm_max(v1[2], v2[2]), v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_minadd) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {1.0f, 2.0f, 3.0f}, + v4 = {1.0f, 2.0f, 3.0f}; + + GLM(vec3_minadd)(v1, v2, v4); + + ASSERT(test_eq(v3[0] + glm_min(v1[0], v2[0]), v4[0])) + ASSERT(test_eq(v3[1] + glm_min(v1[1], v2[1]), v4[1])) + ASSERT(test_eq(v3[2] + glm_min(v1[2], v2[2]), v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_negate_to) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3, v4; + + GLM(vec3_negate_to)(v1, v3); + GLM(vec3_negate_to)(v2, v4); + + ASSERT(test_eq(-v1[0], v3[0])) + ASSERT(test_eq(-v1[1], v3[1])) + ASSERT(test_eq(-v1[2], v3[2])) + + ASSERT(test_eq(-v2[0], v4[0])) + ASSERT(test_eq(-v2[1], v4[1])) + ASSERT(test_eq(-v2[2], v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_negate) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f}, + v3 = {2.0f, -3.0f, 4.0f}, + v4 = {-3.0f, 4.0f, -5.0f}; + + GLM(vec3_negate)(v1); + GLM(vec3_negate)(v2); + + ASSERT(test_eq(-v1[0], v3[0])) + ASSERT(test_eq(-v1[1], v3[1])) + ASSERT(test_eq(-v1[2], v3[2])) + + ASSERT(test_eq(-v2[0], v4[0])) + ASSERT(test_eq(-v2[1], v4[1])) + ASSERT(test_eq(-v2[2], v4[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_normalize) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2 = {2.0f, -3.0f, 4.0f}; + float s = 1.0f; + float norm; + + GLM(vec3_normalize)(v2); + + norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2]); + if (norm == 0.0f) { + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(test_eq(v1[0] * norm, v2[0])) + ASSERT(test_eq(v1[1] * norm, v2[1])) + ASSERT(test_eq(v1[2] * norm, v2[2])) + + glm_vec3_zero(v1); + GLM(vec3_normalize)(v1); + ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC3_ZERO)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_normalize_to) { + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2; + float s = 1.0f; + float norm; + + GLM(vec3_normalize_to)(v1, v2); + + norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2]); + if (norm == 0.0f) { + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(test_eq(v1[0] * norm, v2[0])) + ASSERT(test_eq(v1[1] * norm, v2[1])) + ASSERT(test_eq(v1[2] * norm, v2[2])) + + glm_vec3_zero(v1); + GLM(vec3_normalize_to)(v1, v2); + ASSERTIFY(test_assert_vec3_eq(v2, GLM_VEC3_ZERO)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, normalize) { + /* SAME AS VEC3_NORMALIZE */ + + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2 = {2.0f, -3.0f, 4.0f}; + float s = 1.0f; + float norm; + + GLM(vec3_normalize)(v2); + + norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2]); + if (norm == 0.0f) { + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(test_eq(v1[0] * norm, v2[0])) + ASSERT(test_eq(v1[1] * norm, v2[1])) + ASSERT(test_eq(v1[2] * norm, v2[2])) + + glm_vec3_zero(v1); + GLM(vec3_normalize)(v1); + ASSERTIFY(test_assert_vec3_eq(v1, GLM_VEC3_ZERO)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, normalize_to) { + /* SAME AS VEC3_NORMALIZE_TO */ + + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2; + float s = 1.0f; + float norm; + + GLM(vec3_normalize_to)(v1, v2); + + norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2]); + if (norm == 0.0f) { + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(test_eq(v1[0] * norm, v2[0])) + ASSERT(test_eq(v1[1] * norm, v2[1])) + ASSERT(test_eq(v1[2] * norm, v2[2])) + + glm_vec3_zero(v1); + GLM(vec3_normalize_to)(v1, v2); + ASSERTIFY(test_assert_vec3_eq(v2, GLM_VEC3_ZERO)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_cross) { + /* (u2.v3 - u3.v2, u3.v1 - u1.v3, u1.v2 - u2.v1) */ + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2 = {12.0f, -31.0f, 43.0f}, v3, v4; + + GLM(vec3_cross)(v1, v2, v3); + + v4[0] = v1[1] * v2[2] - v1[2] * v2[1]; + v4[1] = v1[2] * v2[0] - v1[0] * v2[2]; + v4[2] = v1[0] * v2[1] - v1[1] * v2[0]; + + ASSERTIFY(test_assert_vec3_eq(v3, v4)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_crossn) { + /* (u2.v3 - u3.v2, u3.v1 - u1.v3, u1.v2 - u2.v1) */ + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2 = {12.0f, -31.0f, 43.0f}, v3, v4; + + GLM(vec3_crossn)(v1, v2, v3); + + v4[0] = v1[1] * v2[2] - v1[2] * v2[1]; + v4[1] = v1[2] * v2[0] - v1[0] * v2[2]; + v4[2] = v1[0] * v2[1] - v1[1] * v2[0]; + + glm_normalize(v4); + + ASSERTIFY(test_assert_vec3_eq(v3, v4)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, cross) { + /* SAME AS VEC3_CROSS */ + + /* (u2.v3 - u3.v2, u3.v1 - u1.v3, u1.v2 - u2.v1) */ + vec3 v1 = {2.0f, -3.0f, 4.0f}, v2 = {12.0f, -31.0f, 43.0f}, v3, v4; + + GLM(vec3_cross)(v1, v2, v3); + + v4[0] = v1[1] * v2[2] - v1[2] * v2[1]; + v4[1] = v1[2] * v2[0] - v1[0] * v2[2]; + v4[2] = v1[0] * v2[1] - v1[1] * v2[0]; + + ASSERTIFY(test_assert_vec3_eq(v3, v4)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_angle) { + vec3 v1 = {1.0f, 0.0f, 0.0f}, + v2 = {1.0f, 0.0f, 1.0f}, + v3 = {0.0f, 1.0f, 0.0f}; + float a; + + a = GLM(vec3_angle)(v1, v1); + ASSERT(!isinf(a)) + ASSERT(!isnan(a)) + ASSERT(test_eq(a, 0.0f)) + + a = GLM(vec3_angle)(v1, v2); + ASSERT(!isinf(a)) + ASSERT(!isnan(a)) + ASSERT(test_eq(a, GLM_PI_4f)) + + a = GLM(vec3_angle)(v1, v3); + ASSERT(!isinf(a)) + ASSERT(!isnan(a)) + ASSERT(test_eq(a, GLM_PI_2f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_rotate) { + vec3 v1 = {1.0f, 0.0f, 0.0f}, v2 = {1.0f, 1.0f, 1.0f}; + + /* rotate X around Y = -Z */ + GLM(vec3_rotate)(v1, GLM_PI_2f, GLM_YUP); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -1.0f)) + + /* rotate -Z around X = Y */ + GLM(vec3_rotate)(v1, GLM_PI_2f, GLM_XUP); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 1.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + /* rotate Y around Z = -X */ + GLM(vec3_rotate)(v1, GLM_PI_2f, GLM_ZUP); + + ASSERT(test_eq(v1[0], -1.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + /* rotate v2 around Y by 90deg */ + GLM(vec3_rotate)(v2, GLM_PI_2f, GLM_YUP); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], -1.0f)) + + /* rotate v2 around Y by 90deg */ + GLM(vec3_rotate)(v2, GLM_PI_2f, GLM_YUP); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], -1.0f)) + + /* rotate v2 around Y by 90deg */ + GLM(vec3_rotate)(v2, GLM_PI_2f, GLM_YUP); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + /* rotate v2 around X by 90deg */ + GLM(vec3_rotate)(v2, GLM_PI_2f, GLM_XUP); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], -1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + /* rotate v2 around Z by 90deg */ + GLM(vec3_rotate)(v2, GLM_PI_2f, GLM_ZUP); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], -1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_rotate_m4) { + vec3 v1 = {1.0f, 0.0f, 0.0f}, v2 = {1.0f, 1.0f, 1.0f}; + mat4 x, y, z; + + glm_rotate_make(x, GLM_PI_2f, GLM_XUP); + glm_rotate_make(y, GLM_PI_2f, GLM_YUP); + glm_rotate_make(z, GLM_PI_2f, GLM_ZUP); + + /* rotate X around Y = -Z */ + GLM(vec3_rotate_m4)(y, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -1.0f)) + + /* rotate -Z around X = Y */ + GLM(vec3_rotate_m4)(x, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 1.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + /* rotate Y around X = -X */ + GLM(vec3_rotate_m4)(z, v1, v1); + + ASSERT(test_eq(v1[0], -1.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + /* rotate v2 around Y by 90deg */ + GLM(vec3_rotate_m4)(y, v2, v2); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], -1.0f)) + + /* rotate v2 around Y by 90deg */ + GLM(vec3_rotate_m4)(y, v2, v2); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], -1.0f)) + + /* rotate v2 around Y by 90deg */ + GLM(vec3_rotate_m4)(y, v2, v2); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + /* rotate v2 around X by 90deg */ + GLM(vec3_rotate_m4)(x, v2, v2); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], -1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + /* rotate v2 around Z by 90deg */ + GLM(vec3_rotate_m4)(z, v2, v2); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], -1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + TEST_SUCCESS +} + + +TEST_IMPL(GLM_PREFIX, vec3_rotate_m3) { + vec3 v1 = {1.0f, 0.0f, 0.0f}, v2 = {1.0f, 1.0f, 1.0f}; + mat4 x0, y0, z0; + mat3 x, y, z; + + glm_rotate_make(x0, GLM_PI_2f, GLM_XUP); + glm_rotate_make(y0, GLM_PI_2f, GLM_YUP); + glm_rotate_make(z0, GLM_PI_2f, GLM_ZUP); + + glm_mat4_pick3(x0, x); + glm_mat4_pick3(y0, y); + glm_mat4_pick3(z0, z); + + /* rotate X around Y = -Z */ + GLM(vec3_rotate_m3)(y, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], -1.0f)) + + /* rotate -Z around X = Y */ + GLM(vec3_rotate_m3)(x, v1, v1); + + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 1.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + /* rotate Y around Z = -X */ + GLM(vec3_rotate_m3)(z, v1, v1); + + ASSERT(test_eq(v1[0], -1.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + + /* rotate v2 around Y by 90deg */ + GLM(vec3_rotate_m3)(y, v2, v2); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], -1.0f)) + + /* rotate v2 around Y by 90deg */ + GLM(vec3_rotate_m3)(y, v2, v2); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], -1.0f)) + + /* rotate v2 around Y by 90deg */ + GLM(vec3_rotate_m3)(y, v2, v2); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + /* rotate v2 around X by 90deg */ + GLM(vec3_rotate_m3)(x, v2, v2); + + ASSERT(test_eq(v2[0], -1.0f)) + ASSERT(test_eq(v2[1], -1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + /* rotate v2 around Z by 90deg */ + GLM(vec3_rotate_m3)(z, v2, v2); + + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], -1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_proj) { + vec3 v1 = {3.0f, 4.0f, 0.0f}, + v2 = {10.0f, 0.0f, 0.0f}, + v3 = {0.0f, 10.0f, 0.0f}, + v4 = {3.0f, 0.0f, 0.0f}, + v5 = {0.0f, 4.0f, 0.0f}, + v6; + + GLM(vec3_proj)(v1, v2, v6); + ASSERTIFY(test_assert_vec3_eq(v4, v6)) + + GLM(vec3_proj)(v1, v3, v6); + ASSERTIFY(test_assert_vec3_eq(v5, v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_center) { + vec3 v1 = {30.0f, 0.0f, 0.0f}, + v2 = {0.0f, 0.0f, 0.0f}, + v3 = {15.0f, 0.0f, 0.0f}, + v4 = {3.0f, 10.0f, 120.0f}, + v5 = {0.46f, 4.0f, 14.0f}, + v6; + + GLM(vec3_center)(v1, v2, v6); + ASSERTIFY(test_assert_vec3_eq(v3, v6)) + + GLM(vec3_center)(v4, v5, v6); + ASSERT(test_eq((v4[0] + v5[0]) * 0.5f, v6[0])) + ASSERT(test_eq((v4[1] + v5[1]) * 0.5f, v6[1])) + ASSERT(test_eq((v4[2] + v5[2]) * 0.5f, v6[2])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_distance2) { + vec3 v1 = {30.0f, 0.0f, 0.0f}, + v2 = {0.0f, 0.0f, 0.0f}, + v3 = {3.0f, 10.0f, 120.0f}, + v4 = {0.46f, 4.0f, 14.0f}; + float d; + + d = GLM(vec3_distance2)(v1, v2); + ASSERT(test_eq(d, 30.0f * 30.0f)) + + d = GLM(vec3_distance2)(v3, v4); + ASSERT(test_eq(powf(v3[0] - v4[0], 2.0f) + + powf(v3[1] - v4[1], 2.0f) + + powf(v3[2] - v4[2], 2.0f), d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_distance) { + vec3 v1 = {30.0f, 0.0f, 0.0f}, + v2 = {0.0f, 0.0f, 0.0f}, + v3 = {3.0f, 10.0f, 120.0f}, + v4 = {0.46f, 4.0f, 14.0f}; + float d; + + d = GLM(vec3_distance)(v1, v2); + ASSERT(test_eq(d, 30.0f)) + + d = GLM(vec3_distance)(v3, v4); + ASSERT(test_eq(sqrtf(powf(v3[0] - v4[0], 2.0f) + + powf(v3[1] - v4[1], 2.0f) + + powf(v3[2] - v4[2], 2.0f)), d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_maxv) { + vec3 v1, v2, v3; + vec3 v5 = {-1.456f, -1.456f, 241.456f}; + vec3 v6 = {11.0f, 11.0f, 11.0f}; + vec3 v7 = {78.0f, -78.0f, 7.0f}; + + GLM(vec3_maxv)(v5, v6, v1); + GLM(vec3_maxv)(v5, v7, v2); + GLM(vec3_maxv)(v6, v7, v3); + + ASSERT(test_eq(v1[0], 11.0f)) + ASSERT(test_eq(v1[1], 11.0f)) + ASSERT(test_eq(v1[2], 241.456f)) + + ASSERT(test_eq(v2[0], 78.0f)) + ASSERT(test_eq(v2[1], -1.456f)) + ASSERT(test_eq(v2[2], 241.456f)) + + ASSERT(test_eq(v3[0], 78.0f)) + ASSERT(test_eq(v3[1], 11.0f)) + ASSERT(test_eq(v3[2], 11.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_minv) { + vec3 v1, v2, v3; + vec3 v5 = {-1.456f, -1.456f, 241.456f}; + vec3 v6 = {11.0f, 11.0f, 11.0f}; + vec3 v7 = {78.0f, -78.0f, 7.0f}; + + GLM(vec3_minv)(v5, v6, v1); + GLM(vec3_minv)(v5, v7, v2); + GLM(vec3_minv)(v6, v7, v3); + + ASSERT(test_eq(v1[0], -1.456f)) + ASSERT(test_eq(v1[1], -1.456f)) + ASSERT(test_eq(v1[2], 11.0f)) + + ASSERT(test_eq(v2[0], -1.456f)) + ASSERT(test_eq(v2[1], -78.0f)) + ASSERT(test_eq(v2[2], 7.0f)) + + ASSERT(test_eq(v3[0], 11.0f)) + ASSERT(test_eq(v3[1], -78.0f)) + ASSERT(test_eq(v3[2], 7.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_ortho) { + vec3 v1, v2, v3; + vec3 v5, v6, v7; + float a; + + test_rand_vec3(v1); + test_rand_vec3(v2); + test_rand_vec3(v3); + + GLM(vec3_ortho)(v1, v5); + GLM(vec3_ortho)(v2, v6); + GLM(vec3_ortho)(v3, v7); + + a = glm_vec3_angle(v1, v5); + ASSERT(!isinf(a)) + ASSERT(!isnan(a)) + ASSERT(test_eq(a, GLM_PI_2f)) + + a = glm_vec3_angle(v2, v6); + ASSERT(!isinf(a)) + ASSERT(!isnan(a)) + ASSERT(test_eq(a, GLM_PI_2f)) + + a = glm_vec3_angle(v3, v7); + ASSERT(!isinf(a)) + ASSERT(!isnan(a)) + ASSERT(test_eq(a, GLM_PI_2f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_clamp) { + vec3 v1 = {-1.456f, -11.456f, 31.456f}; + vec3 v2 = {0.110f, 111.0f, 11.0f}; + vec3 v3 = {78.0f, 32.0f, -78.0f}; + + GLM(vec3_clamp)(v1, -1.03f, 30.0f); + GLM(vec3_clamp)(v2, 0.11f, 111.0f); + GLM(vec3_clamp)(v3, -88.0f, 70.0f); + + ASSERT(test_eq(v1[0], -1.03f)) + ASSERT(test_eq(v1[1], -1.03f)) + ASSERT(test_eq(v1[2], 30.0f)) + + ASSERT(test_eq(v2[0], 0.11f)) + ASSERT(test_eq(v2[1], 111.0f)) + ASSERT(test_eq(v2[2], 11.0f)) + + ASSERT(test_eq(v3[0], 70.0f)) + ASSERT(test_eq(v3[1], 32.0f)) + ASSERT(test_eq(v3[2], -78.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_lerp) { + vec3 v1 = {-100.0f, -200.0f, -10.0f}; + vec3 v2 = {100.0f, 200.0f, 10.0f}; + vec3 v3; + + GLM(vec3_lerp)(v1, v2, 0.5f, v3); + ASSERT(test_eq(v3[0], 0.0f)) + ASSERT(test_eq(v3[1], 0.0f)) + ASSERT(test_eq(v3[2], 0.0f)) + + GLM(vec3_lerp)(v1, v2, 0.75f, v3); + ASSERT(test_eq(v3[0], 50.0f)) + ASSERT(test_eq(v3[1], 100.0f)) + ASSERT(test_eq(v3[2], 5.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_lerpc) { + vec3 v1 = {-100.0f, -200.0f, -10.0f}; + vec3 v2 = {100.0f, 200.0f, 10.0f}; + vec3 v3; + + GLM(vec3_lerpc)(v1, v2, 0.5f, v3); + ASSERT(test_eq(v3[0], 0.0f)) + ASSERT(test_eq(v3[1], 0.0f)) + ASSERT(test_eq(v3[2], 0.0f)) + + GLM(vec3_lerpc)(v1, v2, 0.75f, v3); + ASSERT(test_eq(v3[0], 50.0f)) + ASSERT(test_eq(v3[1], 100.0f)) + ASSERT(test_eq(v3[2], 5.0f)) + + GLM(vec3_lerpc)(v1, v2, -1.75f, v3); + ASSERT(test_eq(v3[0], -100.0f)) + ASSERT(test_eq(v3[1], -200.0f)) + ASSERT(test_eq(v3[2], -10.0f)) + + GLM(vec3_lerpc)(v1, v2, 1.75f, v3); + ASSERT(test_eq(v3[0], 100.0f)) + ASSERT(test_eq(v3[1], 200.0f)) + ASSERT(test_eq(v3[2], 10.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_mix) { + vec3 v1 = {-100.0f, -200.0f, -10.0f}; + vec3 v2 = {100.0f, 200.0f, 10.0f}; + vec3 v3; + + GLM(vec3_mix)(v1, v2, 0.5f, v3); + ASSERT(test_eq(v3[0], 0.0f)) + ASSERT(test_eq(v3[1], 0.0f)) + ASSERT(test_eq(v3[2], 0.0f)) + + GLM(vec3_mix)(v1, v2, 0.75f, v3); + ASSERT(test_eq(v3[0], 50.0f)) + ASSERT(test_eq(v3[1], 100.0f)) + ASSERT(test_eq(v3[2], 5.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_mixc) { + vec3 v1 = {-100.0f, -200.0f, -10.0f}; + vec3 v2 = {100.0f, 200.0f, 10.0f}; + vec3 v3; + + GLM(vec3_mixc)(v1, v2, 0.5f, v3); + ASSERT(test_eq(v3[0], 0.0f)) + ASSERT(test_eq(v3[1], 0.0f)) + ASSERT(test_eq(v3[2], 0.0f)) + + GLM(vec3_mixc)(v1, v2, 0.75f, v3); + ASSERT(test_eq(v3[0], 50.0f)) + ASSERT(test_eq(v3[1], 100.0f)) + ASSERT(test_eq(v3[2], 5.0f)) + + GLM(vec3_mixc)(v1, v2, -1.75f, v3); + ASSERT(test_eq(v3[0], -100.0f)) + ASSERT(test_eq(v3[1], -200.0f)) + ASSERT(test_eq(v3[2], -10.0f)) + + GLM(vec3_mixc)(v1, v2, 1.75f, v3); + ASSERT(test_eq(v3[0], 100.0f)) + ASSERT(test_eq(v3[1], 200.0f)) + ASSERT(test_eq(v3[2], 10.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_step_uni) { + vec3 v1 = {-100.0f, -200.0f, -10.0f}; + vec3 v2; + + GLM(vec3_step_uni)(-2.5f, v1, v2); + ASSERT(test_eq(v2[0], 0.0f)) + ASSERT(test_eq(v2[1], 0.0f)) + ASSERT(test_eq(v2[2], 0.0f)) + + GLM(vec3_step_uni)(-10.0f, v1, v2); + ASSERT(test_eq(v2[0], 0.0f)) + ASSERT(test_eq(v2[1], 0.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + GLM(vec3_step_uni)(-1000.0f, v1, v2); + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_step) { + vec3 v1 = {-100.0f, -200.0f, -10.0f}; + vec3 s1 = {-100.0f, 0.0f, 10.0f}; + vec3 s2 = {100.0f, -220.0f, -10.0f}; + vec3 s3 = {100.0f, 200.0f, 10.0f}; + vec3 v2; + + GLM(vec3_step)(s1, v1, v2); + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 0.0f)) + ASSERT(test_eq(v2[2], 0.0f)) + + GLM(vec3_step)(s2, v1, v2); + ASSERT(test_eq(v2[0], 0.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + + GLM(vec3_step)(s3, v1, v2); + ASSERT(test_eq(v2[0], 0.0f)) + ASSERT(test_eq(v2[1], 0.0f)) + ASSERT(test_eq(v2[2], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_smoothstep_uni) { + vec3 v1 = {-100.0f, -200.0f, -10.0f}; + vec3 v2; + + GLM(vec3_smoothstep_uni)(-200.0f, -100.0f, v1, v2); + ASSERT(test_eq_th(v2[0], 1.0f, 1e-5f)) + ASSERT(test_eq_th(v2[1], 0.0f, 1e-5f)) + ASSERT(test_eq_th(v2[2], 1.0f, 1e-5f)) + + GLM(vec3_smoothstep_uni)(-250.0f, -200.0f, v1, v2); + ASSERT(test_eq_th(v2[0], 1.0f, 1e-5f)) + ASSERT(test_eq_th(v2[1], 1.0f, 1e-5f)) + ASSERT(test_eq_th(v2[2], 1.0f, 1e-5f)) + + GLM(vec3_smoothstep_uni)(-200.0f, 200, v1, v2); + ASSERT(v2[0] > 0.0f && v2[0] < 0.25f) + ASSERT(test_eq(v2[1], 0.0f)) + ASSERT(v2[2] > 0.0f && v2[2] < 0.5f) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_smoothstep) { + vec3 v1 = {-100.0f, -200.0f, -10.0f}; + vec3 e1_0 = {-100.0f, 0.0f, -11.0f}; + vec3 e1_1 = {50.0f, 10.0f, 20.0f}; + vec3 e2_0 = {-180.0f, -300.0f, -93.0f}; + vec3 e2_1 = {100.0f, 120.0f, -10.0f}; + vec3 e3_0 = {-12.0f, 100.0f, 0.0f}; + vec3 e3_1 = {100.0f, 200.0f, 10.0f}; + vec3 v2; + + GLM(vec3_smoothstep)(e1_0, e1_1, v1, v2); + ASSERT(test_eq_th(v2[0], 0.0f, 1e-5f)) + ASSERT(test_eq_th(v2[1], 0.0f, 1e-5f)) + ASSERT(v2[2] > 0.0f && v2[2] < 0.1f) + + GLM(vec3_smoothstep)(e2_0, e2_1, v1, v2); + ASSERT(v2[0] > 0.0f && v2[0] < 0.25f) + ASSERT(v2[1] > 0.0f && v2[1] < 0.15f) + ASSERT(test_eq_th(v2[2], 1.0f, 1e-5f)) + + GLM(vec3_smoothstep)(e3_0, e3_1, v1, v2); + ASSERT(test_eq_th(v2[0], 0.0f, 1e-5f)) + ASSERT(test_eq_th(v2[1], 0.0f, 1e-5f)) + ASSERT(test_eq_th(v2[2], 0.0f, 1e-5f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_smoothinterp) { + vec3 e1_0 = {-100.0f, 0.0f, -11.0f}; + vec3 e1_1 = {50.0f, 10.0f, 20.0f}; + vec3 e2_0 = {80.0f, -220.0f, -19.0f}; + vec3 e2_1 = {100.0f, -200.0f, -10.0f}; + vec3 e3_0 = {-12.0f, 100.0f, 0.0f}; + vec3 e3_1 = {100.0f, 200.0f, 10.0f}; + vec3 v2; + + GLM(vec3_smoothinterp)(e1_0, e1_1, 0.5f, v2); + ASSERT(v2[0] >= e1_0[0] && v2[0] <= e1_1[0]) + ASSERT(v2[1] >= e1_0[1] && v2[1] <= e1_1[1]) + ASSERT(v2[2] >= e1_0[2] && v2[2] <= e1_1[2]) + + GLM(vec3_smoothinterp)(e2_0, e2_1, 0.5, v2); + ASSERT(v2[0] >= e2_0[0] && v2[0] <= e2_1[0]) + ASSERT(v2[1] >= e2_0[1] && v2[1] <= e2_1[1]) + ASSERT(v2[2] >= e2_0[2] && v2[2] <= e2_1[2]) + + GLM(vec3_smoothinterp)(e3_0, e3_1, 1.0, v2); + ASSERT(v2[0] >= e3_0[0] && v2[0] <= e3_1[0]) + ASSERT(v2[1] >= e3_0[1] && v2[1] <= e3_1[1]) + ASSERT(v2[2] >= e3_0[2] && v2[2] <= e3_1[2]) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_smoothinterpc) { + vec3 e1_0 = {-100.0f, 0.0f, -11.0f}; + vec3 e1_1 = {50.0f, 10.0f, 20.0f}; + vec3 e2_0 = {80.0f, -220.0f, -19.0f}; + vec3 e2_1 = {100.0f, -200.0f, -10.0f}; + vec3 e3_0 = {-12.0f, 100.0f, 0.0f}; + vec3 e3_1 = {100.0f, 200.0f, 10.0f}; + vec3 v2; + + GLM(vec3_smoothinterpc)(e1_0, e1_1, -0.5f, v2); + ASSERT(v2[0] >= e1_0[0] && v2[0] <= e1_1[0]) + ASSERT(v2[1] >= e1_0[1] && v2[1] <= e1_1[1]) + ASSERT(v2[2] >= e1_0[2] && v2[2] <= e1_1[2]) + + GLM(vec3_smoothinterpc)(e2_0, e2_1, 0.5f, v2); + ASSERT(v2[0] >= e2_0[0] && v2[0] <= e2_1[0]) + ASSERT(v2[1] >= e2_0[1] && v2[1] <= e2_1[1]) + ASSERT(v2[2] >= e2_0[2] && v2[2] <= e2_1[2]) + + GLM(vec3_smoothinterpc)(e3_0, e3_1, 2.0f, v2); + ASSERT(v2[0] >= e3_0[0] && v2[0] <= e3_1[0]) + ASSERT(v2[1] >= e3_0[1] && v2[1] <= e3_1[1]) + ASSERT(v2[2] >= e3_0[2] && v2[2] <= e3_1[2]) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_swizzle) { + vec3 v; + + /* ZYX */ + v[0] = 1; + v[1] = 2; + v[2] = 3; + + glm_vec3_swizzle(v, GLM_ZYX, v); + ASSERTIFY(test_assert_vec3_eq(v, (vec3){3, 2, 1})) + + glm_vec3_swizzle(v, GLM_XXX, v); + ASSERTIFY(test_assert_vec3_eq(v, (vec3){3, 3, 3})) + + v[0] = 1; + v[1] = 2; + v[2] = 3; + + glm_vec3_swizzle(v, GLM_YYY, v); + ASSERTIFY(test_assert_vec3_eq(v, (vec3){2, 2, 2})) + + v[0] = 1; + v[1] = 2; + v[2] = 3; + + glm_vec3_swizzle(v, GLM_ZZZ, v); + ASSERTIFY(test_assert_vec3_eq(v, (vec3){3, 3, 3})) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_broadcast) { + vec3 v1, v2, v3; + vec3 v5 = {-1.456f, -1.456f, -1.456f}; + vec3 v6 = {11.0f, 11.0f, 11.0f}; + vec3 v7 = {78.0f, 78.0f, 78.0f}; + + GLM(vec3_broadcast)(-1.456f, v1); + GLM(vec3_broadcast)(11.0f, v2); + GLM(vec3_broadcast)(78.0f, v3); + + ASSERTIFY(test_assert_vec3_eq(v1, v5)) + ASSERTIFY(test_assert_vec3_eq(v2, v6)) + ASSERTIFY(test_assert_vec3_eq(v3, v7)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_fill) { + vec3 v1, v2, v3; + vec3 v5 = {-1.456f, -1.456f, -1.456f}; + vec3 v6 = {11.0f, 11.0f, 11.0f}; + vec3 v7 = {78.0f, 78.0f, 78.0f}; + + GLM(vec3_fill)(v1, -1.456f); + GLM(vec3_fill)(v2, 11.0f); + GLM(vec3_fill)(v3, 78.0f); + + ASSERTIFY(test_assert_vec3_eq(v1, v5)) + ASSERTIFY(test_assert_vec3_eq(v2, v6)) + ASSERTIFY(test_assert_vec3_eq(v3, v7)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_eq) { + vec3 v1, v2, v3; + + GLM(vec3_fill)(v1, -1.456f); + GLM(vec3_fill)(v2, 11.0f); + GLM(vec3_fill)(v3, 78.1f); + + ASSERT(GLM(vec3_eq)(v1, -1.456f)) + ASSERT(GLM(vec3_eq)(v2, 11.0f)) + ASSERT(!GLM(vec3_eq)(v3, 78.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_eq_eps) { + vec3 v1, v2, v3; + + GLM(vec3_fill)(v1, -1.456f); + GLM(vec3_fill)(v2, 11.0f); + GLM(vec3_fill)(v3, 78.1f); + + ASSERT(GLM(vec3_eq_eps)(v1, -1.456f)) + ASSERT(GLM(vec3_eq_eps)(v2, 11.0f)) + ASSERT(!GLM(vec3_eq_eps)(v3, 78.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_eq_all) { + vec3 v1, v2, v3; + vec3 v4 = {2.104f, -3.012f, -4.10f}; + vec3 v5 = {-12.35f, -31.140f, -43.502f}; + + GLM(vec3_fill)(v1, -1.456f); + GLM(vec3_fill)(v2, 11.0f); + GLM(vec3_fill)(v3, 78.0f); + + ASSERT(GLM(vec3_eq_all)(v1)) + ASSERT(GLM(vec3_eq_all)(v2)) + ASSERT(GLM(vec3_eq_all)(v3)) + ASSERT(!GLM(vec3_eq_all)(v4)) + ASSERT(!GLM(vec3_eq_all)(v5)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_eqv) { + vec3 v1, v2, v3, v4, v5; + vec3 v6 = {-1.456f, -1.456f, -1.456f}; + vec3 v7 = {11.0f, 11.0f, 11.0f}; + vec3 v8 = {78.0f, 78.0f, -43.502f}; + + GLM(vec3_fill)(v1, -1.456f); + GLM(vec3_fill)(v2, 11.0f); + GLM(vec3_fill)(v3, 78.0f); + + test_rand_vec3(v4); + test_rand_vec3(v5); + + ASSERT(GLM(vec3_eqv)(v1, v6)) + ASSERT(GLM(vec3_eqv)(v2, v7)) + ASSERT(!GLM(vec3_eqv)(v3, v8)) + ASSERT(!GLM(vec3_eqv)(v4, v5)) + ASSERT(GLM(vec3_eqv)(v5, v5)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_eqv_eps) { + vec3 v1, v2, v3, v4, v5; + vec3 v6 = {-1.456f, -1.456f, -1.456f}; + vec3 v7 = {11.0f, 11.0f, 11.0f}; + vec3 v8 = {78.0f, 78.0f, -43.502f}; + + GLM(vec3_fill)(v1, -1.456f); + GLM(vec3_fill)(v2, 11.0f); + GLM(vec3_fill)(v3, 78.0f); + + test_rand_vec3(v4); + test_rand_vec3(v5); + + ASSERT(GLM(vec3_eqv_eps)(v1, v6)) + ASSERT(GLM(vec3_eqv_eps)(v2, v7)) + ASSERT(!GLM(vec3_eqv_eps)(v3, v8)) + ASSERT(!GLM(vec3_eqv_eps)(v4, v5)) + ASSERT(GLM(vec3_eqv_eps)(v5, v5)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_max) { + vec3 v1 = {2.104f, -3.012f, -4.10f}, v2 = {-12.35f, -31.140f, -43.502f}; + vec3 v3 = {INFINITY, 0.0f, 0.0f}, v4 = {NAN, INFINITY, 2.0f}; + vec3 v5 = {NAN, -1.0f, -1.0f}, v6 = {-1.0f, -11.0f, 11.0f}; + + ASSERT(test_eq(GLM(vec3_max)(v1), 2.104f)) + ASSERT(test_eq(GLM(vec3_max)(v2), -12.35f)) + ASSERT(isinf(GLM(vec3_max)(v3))) + ASSERT(isnan(GLM(vec3_max)(v4))) + ASSERT(isnan(GLM(vec3_max)(v5))) + ASSERT(test_eq(GLM(vec3_max)(v6), 11.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_min) { + vec3 v1 = {2.104f, -3.012f, -4.10f}, v2 = {-12.35f, -31.140f, -43.502f}; + vec3 v3 = {INFINITY, 0.0f, 0.0f}, v4 = {NAN, INFINITY, 2.0f}; + vec3 v5 = {NAN, -1.0f, -1.0f}, v6 = {-1.0f, -11.0f, 11.0f}; + + ASSERT(test_eq(GLM(vec3_min)(v1), -4.10f)) + ASSERT(test_eq(GLM(vec3_min)(v2), -43.502f)) + ASSERT(test_eq(GLM(vec3_min)(v3), 0.0f)) + ASSERT(isnan(GLM(vec3_min)(v4))) + ASSERT(isnan(GLM(vec3_min)(v5))) + ASSERT(test_eq(GLM(vec3_min)(v6), -11.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_isnan) { + vec3 v1 = {2.104f, -3.012f, -4.10f}, v2 = {-12.35f, -31.140f, -43.502f}; + vec3 v3 = {INFINITY, 0.0f, 0.0f}, v4 = {NAN, INFINITY, 2.0f}; + vec3 v5 = {NAN, -1.0f, -1.0f}, v6 = {-1.0f, -1.0f, 11.0f}; + + ASSERT(!GLM(vec3_isnan)(v1)) + ASSERT(!GLM(vec3_isnan)(v2)) + ASSERT(!GLM(vec3_isnan)(v3)) + ASSERT(GLM(vec3_isnan)(v4)) + ASSERT(GLM(vec3_isnan)(v5)) + ASSERT(!GLM(vec3_isnan)(v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_isinf) { + vec3 v1 = {2.104f, -3.012f, -4.10f}, v2 = {-12.35f, -31.140f, -43.502f}; + vec3 v3 = {INFINITY, 0.0f, 0.0f}, v4 = {NAN, INFINITY, 2.0f}; + vec3 v5 = {NAN, -1.0f, -1.0f}, v6 = {-1.0f, -1.0f, 11.0f}; + + ASSERT(!GLM(vec3_isinf)(v1)) + ASSERT(!GLM(vec3_isinf)(v2)) + ASSERT(GLM(vec3_isinf)(v3)) + ASSERT(GLM(vec3_isinf)(v4)) + ASSERT(!GLM(vec3_isinf)(v5)) + ASSERT(!GLM(vec3_isinf)(v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_isvalid) { + vec3 v1 = {2.104f, -3.012f, -4.10f}, v2 = {-12.35f, -31.140f, -43.502f}; + vec3 v3 = {INFINITY, 0.0f, 0.0f}, v4 = {NAN, INFINITY, 2.0f}; + vec3 v5 = {NAN, -1.0f, -1.0f}, v6 = {-1.0f, -1.0f, 11.0f}; + + ASSERT(GLM(vec3_isvalid)(v1)) + ASSERT(GLM(vec3_isvalid)(v2)) + ASSERT(!GLM(vec3_isvalid)(v3)) + ASSERT(!GLM(vec3_isvalid)(v4)) + ASSERT(!GLM(vec3_isvalid)(v5)) + ASSERT(GLM(vec3_isvalid)(v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_sign) { + vec3 v1 = {2.104f, -3.012f, -4.10f}, v2 = {-12.35f, -31.140f, -43.502f}; + vec3 v3, v4; + vec3 v5 = {1.0f, -1.0f, -1.0f}, v6 = {-1.0f, -1.0f, -1.0f}; + + GLM(vec3_sign)(v1, v3); + GLM(vec3_sign)(v2, v4); + + ASSERTIFY(test_assert_vec3_eq(v3, v5)) + ASSERTIFY(test_assert_vec3_eq(v4, v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_abs) { + vec3 v1 = {2.104f, -3.012f, -4.10f}, v2 = {-12.35f, -31.140f, -43.502f}; + vec3 v3, v4; + vec3 v5 = {2.104f, 3.012f, 4.10f}, v6 = {12.35f, 31.140f, 43.502f}; + + GLM(vec3_abs)(v1, v3); + GLM(vec3_abs)(v2, v4); + + ASSERTIFY(test_assert_vec3_eq(v3, v5)) + ASSERTIFY(test_assert_vec3_eq(v4, v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_fract) { + vec3 v1 = {2.104f, 3.012f, 4.10f}, v2 = {12.35f, 31.140f, 43.502f}, v3, v4; + vec3 v5 = {0.104f, 0.012f, 0.10f}, v6 = {0.35f, 0.140f, 0.502f}; + + GLM(vec3_fract)(v1, v3); + GLM(vec3_fract)(v2, v4); + + ASSERTIFY(test_assert_vec3_eq(v3, v5)) + ASSERTIFY(test_assert_vec3_eq(v4, v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_hadd) { + vec3 v1 = {2.0f, 3.0f, 4.0f}, v2 = {12.0f, 31.0f, 43.0f}; + float r1, r2, r3, r4; + + r1 = GLM(vec3_hadd)(v1); + r2 = GLM(vec3_hadd)(v2); + + r3 = v1[0] + v1[1] + v1[2]; + r4 = v2[0] + v2[1] + v2[2]; + + ASSERT(test_eq(r1, r3)) + ASSERT(test_eq(r2, r4)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec3_sqrt) { + vec3 v1 = {2.0f, 3.0f, 4.0f}, v2 = {12.0f, 31.0f, 43.0f}, v3, v4; + + GLM(vec3_sqrt)(v1, v3); + GLM(vec3_sqrt)(v2, v4); + + ASSERT(test_eq(sqrtf(v1[0]), v3[0])) + ASSERT(test_eq(sqrtf(v1[1]), v3[1])) + ASSERT(test_eq(sqrtf(v1[2]), v3[2])) + + ASSERT(test_eq(sqrtf(v2[0]), v4[0])) + ASSERT(test_eq(sqrtf(v2[1]), v4[1])) + ASSERT(test_eq(sqrtf(v2[2]), v4[2])) + + TEST_SUCCESS +} diff --git a/test/src/test_vec4.c b/test/src/test_vec4.c deleted file mode 100644 index 2e9d12e..0000000 --- a/test/src/test_vec4.c +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Copyright (c), Recep Aslantas. - * - * MIT License (MIT), http://opensource.org/licenses/MIT - * Full license can be found in the LICENSE file - */ - -#include "test_common.h" - -CGLM_INLINE -float -test_vec4_dot(vec4 a, vec4 b) { - return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]; -} - -CGLM_INLINE -void -test_vec4_normalize_to(vec4 vec, vec4 dest) { - float norm; - - norm = glm_vec4_norm(vec); - - if (norm == 0.0f) { - dest[0] = dest[1] = dest[2] = dest[3] = 0.0f; - return; - } - - glm_vec4_scale(vec, 1.0f / norm, dest); -} - -float -test_vec4_norm2(vec4 vec) { - return test_vec4_dot(vec, vec); -} - -float -test_vec4_norm(vec4 vec) { - return sqrtf(test_vec4_dot(vec, vec)); -} - -void -test_vec4_maxv(vec4 v1, vec4 v2, vec4 dest) { - dest[0] = glm_max(v1[0], v2[0]); - dest[1] = glm_max(v1[1], v2[1]); - dest[2] = glm_max(v1[2], v2[2]); - dest[3] = glm_max(v1[3], v2[3]); -} - -void -test_vec4_minv(vec4 v1, vec4 v2, vec4 dest) { - dest[0] = glm_min(v1[0], v2[0]); - dest[1] = glm_min(v1[1], v2[1]); - dest[2] = glm_min(v1[2], v2[2]); - dest[3] = glm_min(v1[3], v2[3]); -} - -void -test_vec4_clamp(vec4 v, float minVal, float maxVal) { - v[0] = glm_clamp(v[0], minVal, maxVal); - v[1] = glm_clamp(v[1], minVal, maxVal); - v[2] = glm_clamp(v[2], minVal, maxVal); - v[3] = glm_clamp(v[3], minVal, maxVal); -} - -void -test_vec4(void **state) { - vec4 v, v1, v2, v3, v4; - vec4s vs1, vs2, vs3, vs4; - int i; - float d1, d2; - - for (i = 0; i < 1000; i++) { - /* 1. test SSE/SIMD dot product */ - test_rand_vec4(v); - d1 = glm_vec4_dot(v, v); - d2 = test_vec4_dot(v, v); - - assert_true(fabsf(d1 - d2) <= 0.000009); - - /* 2. test SIMD normalize */ - test_vec4_normalize_to(v, v1); - glm_vec4_normalize_to(v, v2); - glm_vec4_normalize(v); - - /* all must be same */ - test_assert_vec4_eq(v1, v2); - test_assert_vec4_eq(v, v2); - - /* 3. test SIMD norm */ - test_rand_vec4(v); - test_assert_eqf(test_vec4_norm(v), glm_vec4_norm(v)); - - /* 3. test SIMD norm2 */ - test_rand_vec4(v); - test_assert_eqf(test_vec4_norm2(v), glm_vec4_norm2(v)); - - /* 4. test SSE/SIMD distance */ - test_rand_vec4(v1); - test_rand_vec4(v2); - d1 = glm_vec4_distance(v1, v2); - d2 = sqrtf(powf(v1[0]-v2[0], 2.0f) + pow(v1[1]-v2[1], 2.0f) + pow(v1[2]-v2[2], 2.0f) + pow(v1[3]-v2[3], 2.0f)); - assert_true(fabsf(d1 - d2) <= 0.000009); - } - - /* test zero */ - glm_vec4_zero(v); - test_assert_vec4_eq(GLM_VEC4_ZERO, v); - - /* test one */ - glm_vec4_one(v); - test_assert_vec4_eq(GLM_VEC4_ONE, v); - - /* adds, subs, div, divs, mul */ - glm_vec4_add(v, GLM_VEC4_ONE, v); - assert_true(glmc_vec4_eq_eps(v, 2)); - - glm_vec4_adds(v, 10, v); - assert_true(glmc_vec4_eq_eps(v, 12)); - - glm_vec4_sub(v, GLM_VEC4_ONE, v); - assert_true(glmc_vec4_eq_eps(v, 11)); - - glm_vec4_subs(v, 1, v); - assert_true(glmc_vec4_eq_eps(v, 10)); - - glm_vec4_broadcast(2, v1); - glm_vec4_div(v, v1, v); - assert_true(glmc_vec4_eq_eps(v, 5)); - - glm_vec4_divs(v, 0.5, v); - assert_true(glmc_vec4_eq_eps(v, 10)); - - glm_vec4_mul(v, v1, v); - assert_true(glmc_vec4_eq_eps(v, 20)); - - glm_vec4_scale(v, 0.5, v); - assert_true(glmc_vec4_eq_eps(v, 10)); - - glm_vec4_normalize_to(v, v1); - glm_vec4_scale(v1, 0.8, v1); - glm_vec4_scale_as(v, 0.8, v); - test_assert_vec4_eq(v1, v); - - /* addadd, subadd, muladd */ - glm_vec4_one(v); - - glm_vec4_addadd(GLM_VEC4_ONE, GLM_VEC4_ONE, v); - assert_true(glmc_vec4_eq_eps(v, 3)); - - glm_vec4_subadd(GLM_VEC4_ONE, GLM_VEC4_ZERO, v); - assert_true(glmc_vec4_eq_eps(v, 4)); - - glm_vec4_broadcast(2, v1); - glm_vec4_broadcast(3, v2); - glm_vec4_muladd(v1, v2, v); - assert_true(glmc_vec4_eq_eps(v, 10)); - - /* min, max */ - test_rand_vec4(v1); - test_rand_vec4(v2); - - glm_vec4_maxv(v1, v2, v3); - test_vec4_maxv(v1, v2, v4); - test_assert_vec4_eq(v3, v4); - - glm_vec4_minv(v1, v2, v3); - test_vec4_minv(v1, v2, v4); - test_assert_vec4_eq(v3, v4); - - glm_vec4_print(v3, stderr); - glm_vec4_print(v4, stderr); - - /* clamp */ - glm_vec4_clamp(v3, 0.1, 0.8); - test_vec4_clamp(v4, 0.1, 0.8); - test_assert_vec4_eq(v3, v4); - - glm_vec4_print(v3, stderr); - glm_vec4_print(v4, stderr); - - assert_true(v3[0] >= 0.0999 && v3[0] <= 0.80001); /* rounding erros */ - assert_true(v3[1] >= 0.0999 && v3[1] <= 0.80001); - assert_true(v3[2] >= 0.0999 && v3[2] <= 0.80001); - assert_true(v3[3] >= 0.0999 && v3[3] <= 0.80001); - - /* swizzle */ - - /* ZYX */ - v1[0] = 1; - v1[1] = 2; - v1[2] = 3; - v1[3] = 4; - - glm_vec4_swizzle(v1, GLM_WZYX, v1); - test_assert_vec4_eq(v1, (vec4){4, 3, 2, 1}); - - glm_vec4_swizzle(v1, GLM_XXXX, v1); - test_assert_vec4_eq(v1, (vec4){4, 4, 4, 4}); - - v1[0] = 1; - v1[1] = 2; - v1[2] = 3; - v1[3] = 4; - - glm_vec4_swizzle(v1, GLM_YYYY, v1); - test_assert_vec4_eq(v1, (vec4){2, 2, 2, 2}); - - v1[0] = 1; - v1[1] = 2; - v1[2] = 3; - v1[3] = 4; - - glm_vec4_swizzle(v1, GLM_ZZZZ, v1); - test_assert_vec4_eq(v1, (vec4){3, 3, 3, 3}); - - v1[0] = 1; - v1[1] = 2; - v1[2] = 3; - v1[3] = 4; - - glm_vec4_swizzle(v1, GLM_WWWW, v1); - test_assert_vec4_eq(v1, (vec4){4, 4, 4, 4}); - - /* structs */ - vs1 = test_rand_vec4s(); - vs2 = test_rand_vec4s(); - - vs3 = glms_vec4_add(vs1, vs2); - vs4 = glms_vec4_maxv(vs1, vs3); - test_assert_vec4s_eq(vs3, vs4); -} diff --git a/test/src/test_vec4.h b/test/src/test_vec4.h new file mode 100644 index 0000000..5e20c63 --- /dev/null +++ b/test/src/test_vec4.h @@ -0,0 +1,1420 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#include "test_common.h" + +#define TEST_GLM_SHUFFLE4(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2)|(w)) + +#ifndef CGLM_TEST_VEC4_ONCE +#define CGLM_TEST_VEC4_ONCE + +/* Macros */ + +TEST_IMPL(MACRO_GLM_VEC4_ONE_INIT) { + vec4 v = GLM_VEC4_ONE_INIT; + + ASSERT(test_eq(v[0], 1.0f)) + ASSERT(test_eq(v[1], 1.0f)) + ASSERT(test_eq(v[2], 1.0f)) + ASSERT(test_eq(v[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_VEC4_ZERO_INIT) { + vec4 v = GLM_VEC4_ZERO_INIT; + + ASSERT(test_eq(v[0], 0.0f)) + ASSERT(test_eq(v[1], 0.0f)) + ASSERT(test_eq(v[2], 0.0f)) + ASSERT(test_eq(v[3], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_VEC4_ONE) { + ASSERT(test_eq(GLM_VEC4_ONE[0], 1.0f)) + ASSERT(test_eq(GLM_VEC4_ONE[1], 1.0f)) + ASSERT(test_eq(GLM_VEC4_ONE[2], 1.0f)) + ASSERT(test_eq(GLM_VEC4_ONE[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_VEC4_ZERO) { + ASSERT(test_eq(GLM_VEC4_ZERO[0], 0.0f)) + ASSERT(test_eq(GLM_VEC4_ZERO[1], 0.0f)) + ASSERT(test_eq(GLM_VEC4_ZERO[2], 0.0f)) + ASSERT(test_eq(GLM_VEC4_ZERO[3], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_XXXX) { + ASSERT(TEST_GLM_SHUFFLE4(0, 0, 0, 0) == GLM_XXXX) + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_YYYY) { + ASSERT(TEST_GLM_SHUFFLE4(1, 1, 1, 1) == GLM_YYYY) + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_ZZZZ) { + ASSERT(TEST_GLM_SHUFFLE4(2, 2, 2, 2) == GLM_ZZZZ) + TEST_SUCCESS +} + +TEST_IMPL(MACRO_GLM_WZYX) { + ASSERT(TEST_GLM_SHUFFLE4(0, 1, 2, 3) == GLM_WZYX) + TEST_SUCCESS +} + +/* Deprecated */ + +TEST_IMPL(MACRO_glm_vec4_dup) { + vec4 v1 = {13.0f, 12.0f, 11.0f, 56.0f}, v2; + + glm_vec4_dup(v1, v2); + + ASSERTIFY(test_assert_vec4_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec4_flipsign) { + vec4 v1 = {13.0f, -12.0f, 11.0f, 56.0f}, + v2 = {13.0f, -12.0f, 11.0f, 56.0f}, + v3 = {-13.0f, 12.0f, -11.0f, -56.0f}; + + glm_vec4_flipsign(v1); + glmc_vec4_flipsign(v2); + + ASSERTIFY(test_assert_vec4_eq(v1, v3)) + ASSERTIFY(test_assert_vec4_eq(v2, v3)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec4_flipsign_to) { + vec4 v1 = {13.0f, -12.0f, 11.0f, 56.0f}, + v2 = {-13.0f, 12.0f, -11.0f, -56.0f}, + v3, v4; + + glm_vec4_flipsign_to(v1, v3); + glmc_vec4_flipsign_to(v1, v4); + + ASSERTIFY(test_assert_vec4_eq(v2, v3)) + ASSERTIFY(test_assert_vec4_eq(v2, v4)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec4_inv) { + vec4 v1 = {13.0f, -12.0f, 11.0f, 56.0f}, + v2 = {13.0f, -12.0f, 11.0f, 56.0f}, + v3 = {-13.0f, 12.0f, -11.0f, -56.0f}; + + glm_vec4_inv(v1); + glmc_vec4_inv(v2); + + ASSERTIFY(test_assert_vec4_eq(v1, v3)) + ASSERTIFY(test_assert_vec4_eq(v2, v3)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec4_inv_to) { + vec4 v1 = {13.0f, -12.0f, 11.0f, 56.0f}, + v2 = {-13.0f, 12.0f, -11.0f, -56.0f}, + v3, v4; + + glm_vec4_inv_to(v1, v3); + glmc_vec4_inv_to(v1, v4); + + ASSERTIFY(test_assert_vec4_eq(v3, v4)) + ASSERTIFY(test_assert_vec4_eq(v2, v3)) + + TEST_SUCCESS +} + +TEST_IMPL(MACRO_glm_vec4_mulv) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 56.0f}, + v2 = {-3.0f, 4.0f, -5.0f, 56.0f}, + v3, v4; + + glm_vec4_mulv(v1, v2, v3); + glmc_vec4_mulv(v1, v2, v4); + + ASSERTIFY(test_assert_vec4_eq(v3, v4)) + + ASSERT(test_eq(v1[0] * v2[0], v3[0])) + ASSERT(test_eq(v1[1] * v2[1], v3[1])) + ASSERT(test_eq(v1[2] * v2[2], v3[2])) + ASSERT(test_eq(v1[3] * v2[3], v3[3])) + + TEST_SUCCESS +} + +#endif /* CGLM_TEST_VEC4_ONCE */ + +/* --- */ + +TEST_IMPL(GLM_PREFIX, vec4) { + vec4 v1 = {10.0f, 9.0f, 8.0f}; + vec4 v2 = {10.0f, 9.0f, 8.0f, 7.0f}; + vec4 v3; + + GLM(vec4)(v1, 7.0f, v3); + + ASSERTIFY(test_assert_vec4_eq(v2, v3)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_copy3) { + vec4 v4 = {10.0f, 9.0f, 8.0f, 7.0f}; + vec3 v3; + + GLM(vec4_copy3)(v4, v3); + + ASSERTIFY(test_assert_vec3_eq(v3, v4)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_copy) { + vec4 v1 = {10.0f, 9.0f, 8.0f, 78.0f}; + vec4 v2 = {1.0f, 2.0f, 3.0f, 4.0f}; + + GLM(vec4_copy)(v1, v2); + + ASSERTIFY(test_assert_vec4_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_ucopy) { + vec4 v1 = {10.0f, 9.0f, 8.0f, 78.0f}; + vec4 v2 = {1.0f, 2.0f, 3.0f, 4.0f}; + + GLM(vec4_ucopy)(v1, v2); + + ASSERTIFY(test_assert_vec4_eq(v1, v2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_zero) { + vec4 v1 = {10.0f, 9.0f, 8.0f, 78.0f}; + vec4 v2 = {1.0f, 2.0f, 3.0f, 4.0f}; + + GLM(vec4_zero)(v1); + GLM(vec4_zero)(v2); + + ASSERTIFY(test_assert_vec4_eq(v1, GLM_VEC4_ZERO)) + ASSERTIFY(test_assert_vec4_eq(v1, GLM_VEC4_ZERO)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_one) { + vec4 v1 = {10.0f, 9.0f, 8.0f, 78.0f}; + vec4 v2 = {1.0f, 2.0f, 3.0f, 4.0f}; + + GLM(vec4_one)(v1); + GLM(vec4_one)(v2); + + ASSERTIFY(test_assert_vec4_eq(v1, GLM_VEC4_ONE)) + ASSERTIFY(test_assert_vec4_eq(v1, GLM_VEC4_ONE)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_dot) { + vec4 a = {10.0f, 9.0f, 8.0f, 78.0f}; + vec4 b = {1.0f, 2.0f, 3.0f, 4.0f}; + float dot1, dot2; + + dot1 = GLM(vec4_dot)(a, b); + dot2 = a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]; + + ASSERT(test_eq(dot1, dot2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_norm2) { + vec4 a = {10.0f, 9.0f, 8.0f, 78.0f}; + float n1, n2; + + n1 = GLM(vec4_norm2)(a); + n2 = a[0] * a[0] + a[1] * a[1] + a[2] * a[2] + a[3] * a[3]; + + ASSERT(test_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_norm) { + vec4 a = {10.0f, 9.0f, 8.0f, 78.0f}; + float n1, n2; + + n1 = GLM(vec4_norm)(a); + n2 = sqrtf(a[0] * a[0] + a[1] * a[1] + a[2] * a[2] + a[3] * a[3]); + + ASSERT(test_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_norm_one) { + vec4 a = {-10.0f, 9.0f, -8.0f, 78.0f}; + float n1, n2; + + n1 = GLM(vec4_norm_one)(a); + n2 = fabsf(a[0]) + fabsf(a[1]) + fabsf(a[2]) + fabsf(a[3]); + + ASSERT(test_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_norm_inf) { + vec4 a = {-10.0f, 9.0f, -8.0f, 78.0f}; + float n1, n2; + + n1 = GLM(vec4_norm_inf)(a); + n2 = fabsf(a[0]); + + if (n2 < fabsf(a[1])) + n2 = fabsf(a[1]); + + if (n2 < fabsf(a[2])) + n2 = fabsf(a[2]); + + if (n2 < fabsf(a[3])) + n2 = fabsf(a[3]); + + ASSERT(test_eq(n1, n2)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_add) { + vec4 a = {-10.0f, 9.0f, -8.0f, 56.0f}; + vec4 b = {12.0f, 19.0f, -18.0f, 1.0f}; + vec4 c, d; + + c[0] = a[0] + b[0]; + c[1] = a[1] + b[1]; + c[2] = a[2] + b[2]; + c[3] = a[3] + b[3]; + + GLM(vec4_add)(a, b, d); + + ASSERTIFY(test_assert_vec4_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_adds) { + vec4 a = {-10.0f, 9.0f, -8.0f, 56.0f}; + vec4 c, d; + float s = 7.0f; + + c[0] = a[0] + s; + c[1] = a[1] + s; + c[2] = a[2] + s; + c[3] = a[3] + s; + + GLM(vec4_adds)(a, s, d); + + ASSERTIFY(test_assert_vec4_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_sub) { + vec4 a = {-10.0f, 9.0f, -8.0f, 56.0f}; + vec4 b = {12.0f, 19.0f, -18.0f, 1.0f}; + vec4 c, d; + + c[0] = a[0] - b[0]; + c[1] = a[1] - b[1]; + c[2] = a[2] - b[2]; + c[3] = a[3] - b[3]; + + GLM(vec4_sub)(a, b, d); + + ASSERTIFY(test_assert_vec4_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_subs) { + vec4 a = {-10.0f, 9.0f, -8.0f, 74.0f}; + vec4 c, d; + float s = 7.0f; + + c[0] = a[0] - s; + c[1] = a[1] - s; + c[2] = a[2] - s; + c[3] = a[3] - s; + + GLM(vec4_subs)(a, s, d); + + ASSERTIFY(test_assert_vec4_eq(c, d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_mul) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 56.0f}, + v2 = {-3.0f, 4.0f, -5.0f, 46.0f}, + v3; + + GLM(vec4_mul)(v1, v2, v3); + + ASSERT(test_eq(v1[0] * v2[0], v3[0])) + ASSERT(test_eq(v1[1] * v2[1], v3[1])) + ASSERT(test_eq(v1[2] * v2[2], v3[2])) + ASSERT(test_eq(v1[3] * v2[3], v3[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_scale) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 5.0f}, v2; + float s = 7.0f; + + GLM(vec4_scale)(v1, s, v2); + + ASSERT(test_eq(v1[0] * s, v2[0])) + ASSERT(test_eq(v1[1] * s, v2[1])) + ASSERT(test_eq(v1[2] * s, v2[2])) + ASSERT(test_eq(v1[3] * s, v2[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_scale_as) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 5.0f}, v2; + float s = 7.0f; + float norm; + + GLM(vec4_scale_as)(v1, s, v2); + + norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2] + v1[3] * v1[3]); + if (norm == 0.0f) { + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + ASSERT(test_eq(v1[3], 0.0f)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(test_eq(v1[0] * norm, v2[0])) + ASSERT(test_eq(v1[1] * norm, v2[1])) + ASSERT(test_eq(v1[2] * norm, v2[2])) + ASSERT(test_eq(v1[3] * norm, v2[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_div) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 40.0f}, + v2 = {-3.0f, 4.0f, -5.0f, 2.0f}, + v3; + + GLM(vec4_div)(v1, v2, v3); + + ASSERT(test_eq(v1[0] / v2[0], v3[0])) + ASSERT(test_eq(v1[1] / v2[1], v3[1])) + ASSERT(test_eq(v1[2] / v2[2], v3[2])) + ASSERT(test_eq(v1[3] / v2[3], v3[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_divs) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 40.0f}, v2; + float s = 7.0f; + + GLM(vec4_divs)(v1, s, v2); + + ASSERT(test_eq(v1[0] / s, v2[0])) + ASSERT(test_eq(v1[1] / s, v2[1])) + ASSERT(test_eq(v1[2] / s, v2[2])) + ASSERT(test_eq(v1[3] / s, v2[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_addadd) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f, 20.0f}, + v3 = {1.0f, 2.0f, 3.0f, 130.0f}, + v4 = {1.0f, 2.0f, 3.0f, 130.0f}; + + GLM(vec4_addadd)(v1, v2, v4); + + ASSERT(test_eq(v3[0] + v1[0] + v2[0], v4[0])) + ASSERT(test_eq(v3[1] + v1[1] + v2[1], v4[1])) + ASSERT(test_eq(v3[2] + v1[2] + v2[2], v4[2])) + ASSERT(test_eq(v3[3] + v1[3] + v2[3], v4[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_subadd) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f, 20.0f}, + v3 = {1.0f, 2.0f, 3.0f, 130.0f}, + v4 = {1.0f, 2.0f, 3.0f, 130.0f}; + + GLM(vec4_subadd)(v1, v2, v4); + + ASSERT(test_eq(v3[0] + v1[0] - v2[0], v4[0])) + ASSERT(test_eq(v3[1] + v1[1] - v2[1], v4[1])) + ASSERT(test_eq(v3[2] + v1[2] - v2[2], v4[2])) + ASSERT(test_eq(v3[3] + v1[3] - v2[3], v4[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_muladd) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f, 20.0f}, + v3 = {1.0f, 2.0f, 3.0f, 130.0f}, + v4 = {1.0f, 2.0f, 3.0f, 130.0f}; + + GLM(vec4_muladd)(v1, v2, v4); + + ASSERT(test_eq(v3[0] + v1[0] * v2[0], v4[0])) + ASSERT(test_eq(v3[1] + v1[1] * v2[1], v4[1])) + ASSERT(test_eq(v3[2] + v1[2] * v2[2], v4[2])) + ASSERT(test_eq(v3[3] + v1[3] * v2[3], v4[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_muladds) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f, 20.0f}, + v3 = {-3.0f, 4.0f, -5.0f, 20.0f}; + float s = 9.0f; + + GLM(vec4_muladds)(v1, s, v3); + + ASSERT(test_eq(v2[0] + v1[0] * s, v3[0])) + ASSERT(test_eq(v2[1] + v1[1] * s, v3[1])) + ASSERT(test_eq(v2[2] + v1[2] * s, v3[2])) + ASSERT(test_eq(v2[3] + v1[3] * s, v3[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_maxadd) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f, 20.0f}, + v3 = {1.0f, 2.0f, 3.0f, 130.0f}, + v4 = {1.0f, 2.0f, 3.0f, 130.0f}; + + GLM(vec4_maxadd)(v1, v2, v4); + + ASSERT(test_eq(v3[0] + glm_max(v1[0], v2[0]), v4[0])) + ASSERT(test_eq(v3[1] + glm_max(v1[1], v2[1]), v4[1])) + ASSERT(test_eq(v3[2] + glm_max(v1[2], v2[2]), v4[2])) + ASSERT(test_eq(v3[3] + glm_max(v1[3], v2[3]), v4[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_minadd) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 4.0f}, + v2 = {-3.0f, 4.0f, -5.0f, 20.0f}, + v3 = {1.0f, 2.0f, 3.0f, 130.0f}, + v4 = {1.0f, 2.0f, 3.0f, 130.0f}; + + GLM(vec4_minadd)(v1, v2, v4); + + ASSERT(test_eq(v3[0] + glm_min(v1[0], v2[0]), v4[0])) + ASSERT(test_eq(v3[1] + glm_min(v1[1], v2[1]), v4[1])) + ASSERT(test_eq(v3[2] + glm_min(v1[2], v2[2]), v4[2])) + ASSERT(test_eq(v3[3] + glm_min(v1[3], v2[3]), v4[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_negate_to) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 60.0f}, + v2 = {-3.0f, 4.0f, -5.0f, 34.0f}, + v3, v4; + + GLM(vec4_negate_to)(v1, v3); + GLM(vec4_negate_to)(v2, v4); + + ASSERT(test_eq(-v1[0], v3[0])) + ASSERT(test_eq(-v1[1], v3[1])) + ASSERT(test_eq(-v1[2], v3[2])) + ASSERT(test_eq(-v1[3], v3[3])) + + ASSERT(test_eq(-v2[0], v4[0])) + ASSERT(test_eq(-v2[1], v4[1])) + ASSERT(test_eq(-v2[2], v4[2])) + ASSERT(test_eq(-v2[3], v4[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_negate) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 60.0f}, + v2 = {-3.0f, 4.0f, -5.0f, 34.0f}, + v3 = {2.0f, -3.0f, 4.0f, 60.0f}, + v4 = {-3.0f, 4.0f, -5.0f, 34.0f}; + + GLM(vec4_negate)(v1); + GLM(vec4_negate)(v2); + + ASSERT(test_eq(-v1[0], v3[0])) + ASSERT(test_eq(-v1[1], v3[1])) + ASSERT(test_eq(-v1[2], v3[2])) + ASSERT(test_eq(-v1[3], v3[3])) + + ASSERT(test_eq(-v2[0], v4[0])) + ASSERT(test_eq(-v2[1], v4[1])) + ASSERT(test_eq(-v2[2], v4[2])) + ASSERT(test_eq(-v2[3], v4[3])) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_normalize) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 5.0f}, v2 = {2.0f, -3.0f, 4.0f, 5.0f}; + float s = 1.0f; + float norm; + + GLM(vec4_normalize)(v2); + + norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2] + v1[3] * v1[3]); + if (norm == 0.0f) { + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + ASSERT(test_eq(v1[3], 0.0f)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(test_eq(v1[0] * norm, v2[0])) + ASSERT(test_eq(v1[1] * norm, v2[1])) + ASSERT(test_eq(v1[2] * norm, v2[2])) + ASSERT(test_eq(v1[3] * norm, v2[3])) + + glm_vec4_zero(v1); + GLM(vec4_normalize)(v1); + ASSERTIFY(test_assert_vec4_eq(v1, GLM_VEC4_ZERO)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_normalize_to) { + vec4 v1 = {2.0f, -3.0f, 4.0f, 5.0f}, v2; + float s = 1.0f; + float norm; + + GLM(vec4_normalize_to)(v1, v2); + + norm = sqrtf(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2] + v1[3] * v1[3]); + if (norm == 0.0f) { + ASSERT(test_eq(v1[0], 0.0f)) + ASSERT(test_eq(v1[1], 0.0f)) + ASSERT(test_eq(v1[2], 0.0f)) + ASSERT(test_eq(v1[3], 0.0f)) + + TEST_SUCCESS + } + + norm = s / norm; + + ASSERT(test_eq(v1[0] * norm, v2[0])) + ASSERT(test_eq(v1[1] * norm, v2[1])) + ASSERT(test_eq(v1[2] * norm, v2[2])) + ASSERT(test_eq(v1[3] * norm, v2[3])) + + glm_vec4_zero(v1); + GLM(vec4_normalize_to)(v1, v2); + ASSERTIFY(test_assert_vec4_eq(v2, GLM_VEC4_ZERO)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_distance2) { + vec4 v1 = {30.0f, 0.0f, 0.0f, 0.0f}, + v2 = {0.0f, 0.0f, 0.0f, 0.0f}, + v3 = {3.0f, 10.0f, 120.0f, 140.0f}, + v4 = {0.46f, 4.0f, 14.0f, 10.0f}; + float d; + + d = GLM(vec4_distance2)(v1, v2); + ASSERT(test_eq(d, 30.0f * 30.0f)) + + d = GLM(vec4_distance2)(v3, v4); + ASSERT(test_eq(powf(v3[0] - v4[0], 2.0f) + + powf(v3[1] - v4[1], 2.0f) + + powf(v3[2] - v4[2], 2.0f) + + powf(v3[3] - v4[3], 2.0f), d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_distance) { + vec4 v1 = {30.0f, 0.0f, 0.0f, 0.0f}, + v2 = {0.0f, 0.0f, 0.0f, 0.0f}, + v3 = {3.0f, 10.0f, 120.0f, 140.0f}, + v4 = {0.46f, 4.0f, 14.0f, 10.0f}; + float d; + + d = GLM(vec4_distance)(v1, v2); + ASSERT(test_eq(d, 30.0f)) + + d = GLM(vec4_distance)(v3, v4); + ASSERT(test_eq(sqrtf(powf(v3[0] - v4[0], 2.0f) + + powf(v3[1] - v4[1], 2.0f) + + powf(v3[2] - v4[2], 2.0f) + + powf(v3[3] - v4[3], 2.0f)), d)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_maxv) { + vec4 v1, v2, v3; + vec4 v5 = {-1.456f, -1.456f, 241.456f, 10.0f}; + vec4 v6 = {11.0f, 11.0f, 11.0f, 90.0f}; + vec4 v7 = {78.0f, -78.0f, 7.0f, 5.0f}; + + GLM(vec4_maxv)(v5, v6, v1); + GLM(vec4_maxv)(v5, v7, v2); + GLM(vec4_maxv)(v6, v7, v3); + + ASSERT(test_eq(v1[0], 11.0f)) + ASSERT(test_eq(v1[1], 11.0f)) + ASSERT(test_eq(v1[2], 241.456f)) + ASSERT(test_eq(v1[3], 90.0f)) + + ASSERT(test_eq(v2[0], 78.0f)) + ASSERT(test_eq(v2[1], -1.456f)) + ASSERT(test_eq(v2[2], 241.456f)) + ASSERT(test_eq(v2[3], 10.0f)) + + ASSERT(test_eq(v3[0], 78.0f)) + ASSERT(test_eq(v3[1], 11.0f)) + ASSERT(test_eq(v3[2], 11.0f)) + ASSERT(test_eq(v3[3], 90.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_minv) { + vec4 v1, v2, v3; + vec4 v5 = {-1.456f, -1.456f, 241.456f, 10.0f}; + vec4 v6 = {11.0f, 11.0f, 11.0f, 90.0f}; + vec4 v7 = {78.0f, -78.0f, 7.0f, 5.0f}; + + GLM(vec4_minv)(v5, v6, v1); + GLM(vec4_minv)(v5, v7, v2); + GLM(vec4_minv)(v6, v7, v3); + + ASSERT(test_eq(v1[0], -1.456f)) + ASSERT(test_eq(v1[1], -1.456f)) + ASSERT(test_eq(v1[2], 11.0f)) + ASSERT(test_eq(v1[3], 10.0f)) + + ASSERT(test_eq(v2[0], -1.456f)) + ASSERT(test_eq(v2[1], -78.0f)) + ASSERT(test_eq(v2[2], 7.0f)) + ASSERT(test_eq(v2[3], 5.0f)) + + ASSERT(test_eq(v3[0], 11.0f)) + ASSERT(test_eq(v3[1], -78.0f)) + ASSERT(test_eq(v3[2], 7.0f)) + ASSERT(test_eq(v3[3], 5.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_clamp) { + vec4 v1 = {-1.456f, -11.456f, 31.456f, 67.04f}; + vec4 v2 = {0.110f, 111.0f, 11.0f, 90.0f}; + vec4 v3 = {78.0f, 32.0f, -78.0f, 3.0f}; + + GLM(vec4_clamp)(v1, -1.03f, 30.0f); + GLM(vec4_clamp)(v2, 0.11f, 111.0f); + GLM(vec4_clamp)(v3, -88.0f, 70.0f); + + ASSERT(test_eq(v1[0], -1.03f)) + ASSERT(test_eq(v1[1], -1.03f)) + ASSERT(test_eq(v1[2], 30.0f)) + ASSERT(test_eq(v1[3], 30.0f)) + + ASSERT(test_eq(v2[0], 0.11f)) + ASSERT(test_eq(v2[1], 111.0f)) + ASSERT(test_eq(v2[2], 11.0f)) + ASSERT(test_eq(v2[3], 90.0f)) + + ASSERT(test_eq(v3[0], 70.0f)) + ASSERT(test_eq(v3[1], 32.0f)) + ASSERT(test_eq(v3[2], -78.0f)) + ASSERT(test_eq(v3[3], 3.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_lerp) { + vec4 v1 = {-100.0f, -200.0f, -10.0f, -10.0f}; + vec4 v2 = {100.0f, 200.0f, 10.0f, 10.0f}; + vec4 v3; + + GLM(vec4_lerp)(v1, v2, 0.5f, v3); + ASSERT(test_eq(v3[0], 0.0f)) + ASSERT(test_eq(v3[1], 0.0f)) + ASSERT(test_eq(v3[2], 0.0f)) + ASSERT(test_eq(v3[3], 0.0f)) + + GLM(vec4_lerp)(v1, v2, 0.75f, v3); + ASSERT(test_eq(v3[0], 50.0f)) + ASSERT(test_eq(v3[1], 100.0f)) + ASSERT(test_eq(v3[2], 5.0f)) + ASSERT(test_eq(v3[3], 5.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_lerpc) { + vec4 v1 = {-100.0f, -200.0f, -10.0f, -10.0f}; + vec4 v2 = {100.0f, 200.0f, 10.0f, 10.0f}; + vec4 v3; + + GLM(vec4_lerpc)(v1, v2, 0.5f, v3); + ASSERT(test_eq(v3[0], 0.0f)) + ASSERT(test_eq(v3[1], 0.0f)) + ASSERT(test_eq(v3[2], 0.0f)) + ASSERT(test_eq(v3[3], 0.0f)) + + GLM(vec4_lerpc)(v1, v2, 0.75f, v3); + ASSERT(test_eq(v3[0], 50.0f)) + ASSERT(test_eq(v3[1], 100.0f)) + ASSERT(test_eq(v3[2], 5.0f)) + ASSERT(test_eq(v3[3], 5.0f)) + + GLM(vec4_lerpc)(v1, v2, -1.75f, v3); + ASSERT(test_eq(v3[0], -100.0f)) + ASSERT(test_eq(v3[1], -200.0f)) + ASSERT(test_eq(v3[2], -10.0f)) + ASSERT(test_eq(v3[3], -10.0f)) + + GLM(vec4_lerpc)(v1, v2, 1.75f, v3); + ASSERT(test_eq(v3[0], 100.0f)) + ASSERT(test_eq(v3[1], 200.0f)) + ASSERT(test_eq(v3[2], 10.0f)) + ASSERT(test_eq(v3[3], 10.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_mix) { + vec4 v1 = {-100.0f, -200.0f, -10.0f, -10.0f}; + vec4 v2 = {100.0f, 200.0f, 10.0f, 10.0f}; + vec4 v3; + + GLM(vec4_mix)(v1, v2, 0.5f, v3); + ASSERT(test_eq(v3[0], 0.0f)) + ASSERT(test_eq(v3[1], 0.0f)) + ASSERT(test_eq(v3[2], 0.0f)) + ASSERT(test_eq(v3[3], 0.0f)) + + GLM(vec4_mix)(v1, v2, 0.75f, v3); + ASSERT(test_eq(v3[0], 50.0f)) + ASSERT(test_eq(v3[1], 100.0f)) + ASSERT(test_eq(v3[2], 5.0f)) + ASSERT(test_eq(v3[3], 5.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_mixc) { + vec4 v1 = {-100.0f, -200.0f, -10.0f, -10.0f}; + vec4 v2 = {100.0f, 200.0f, 10.0f, 10.0f}; + vec4 v3; + + GLM(vec4_mixc)(v1, v2, 0.5f, v3); + ASSERT(test_eq(v3[0], 0.0f)) + ASSERT(test_eq(v3[1], 0.0f)) + ASSERT(test_eq(v3[2], 0.0f)) + ASSERT(test_eq(v3[3], 0.0f)) + + GLM(vec4_mixc)(v1, v2, 0.75f, v3); + ASSERT(test_eq(v3[0], 50.0f)) + ASSERT(test_eq(v3[1], 100.0f)) + ASSERT(test_eq(v3[2], 5.0f)) + ASSERT(test_eq(v3[3], 5.0f)) + + GLM(vec4_mixc)(v1, v2, -1.75f, v3); + ASSERT(test_eq(v3[0], -100.0f)) + ASSERT(test_eq(v3[1], -200.0f)) + ASSERT(test_eq(v3[2], -10.0f)) + ASSERT(test_eq(v3[3], -10.0f)) + + GLM(vec4_mixc)(v1, v2, 1.75f, v3); + ASSERT(test_eq(v3[0], 100.0f)) + ASSERT(test_eq(v3[1], 200.0f)) + ASSERT(test_eq(v3[2], 10.0f)) + ASSERT(test_eq(v3[3], 10.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_step_uni) { + vec4 v1 = {-100.0f, -200.0f, -10.0f, -10.0f}; + vec4 v2; + + GLM(vec4_step_uni)(-2.5f, v1, v2); + ASSERT(test_eq(v2[0], 0.0f)) + ASSERT(test_eq(v2[1], 0.0f)) + ASSERT(test_eq(v2[2], 0.0f)) + ASSERT(test_eq(v2[3], 0.0f)) + + GLM(vec4_step_uni)(-10.0f, v1, v2); + ASSERT(test_eq(v2[0], 0.0f)) + ASSERT(test_eq(v2[1], 0.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + GLM(vec4_step_uni)(-1000.0f, v1, v2); + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_step) { + vec4 v1 = {-100.0f, -200.0f, -10.0f, -10.0f}; + vec4 s1 = {-100.0f, 0.0f, 10.0f, 10.0f}; + vec4 s2 = {100.0f, -220.0f, -10.0f, -10.0f}; + vec4 s3 = {100.0f, 200.0f, 10.0f, 10.0f}; + vec4 v2; + + GLM(vec4_step)(s1, v1, v2); + ASSERT(test_eq(v2[0], 1.0f)) + ASSERT(test_eq(v2[1], 0.0f)) + ASSERT(test_eq(v2[2], 0.0f)) + ASSERT(test_eq(v2[3], 0.0f)) + + GLM(vec4_step)(s2, v1, v2); + ASSERT(test_eq(v2[0], 0.0f)) + ASSERT(test_eq(v2[1], 1.0f)) + ASSERT(test_eq(v2[2], 1.0f)) + ASSERT(test_eq(v2[3], 1.0f)) + + GLM(vec4_step)(s3, v1, v2); + ASSERT(test_eq(v2[0], 0.0f)) + ASSERT(test_eq(v2[1], 0.0f)) + ASSERT(test_eq(v2[2], 0.0f)) + ASSERT(test_eq(v2[3], 0.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_smoothstep_uni) { + vec4 v1 = {-100.0f, -200.0f, -10.0f, -10.0f}; + vec4 v2; + + GLM(vec4_smoothstep_uni)(-200.0f, -100.0f, v1, v2); + ASSERT(test_eq_th(v2[0], 1.0f, 1e-5f)) + ASSERT(test_eq_th(v2[1], 0.0f, 1e-5f)) + ASSERT(test_eq_th(v2[2], 1.0f, 1e-5f)) + ASSERT(test_eq_th(v2[3], 1.0f, 1e-5f)) + + GLM(vec4_smoothstep_uni)(-250.0f, -200.0f, v1, v2); + ASSERT(test_eq_th(v2[0], 1.0f, 1e-5f)) + ASSERT(test_eq_th(v2[1], 1.0f, 1e-5f)) + ASSERT(test_eq_th(v2[2], 1.0f, 1e-5f)) + ASSERT(test_eq_th(v2[3], 1.0f, 1e-5f)) + + GLM(vec4_smoothstep_uni)(-200.0f, 200.0f, v1, v2); + ASSERT(v2[0] > 0.0f && v2[0] < 0.25f) + ASSERT(test_eq(v2[1], 0.0f)) + ASSERT(v2[2] > 0.0f && v2[2] < 0.5f) + ASSERT(v2[3] > 0.0f && v2[3] < 0.5f) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_smoothstep) { + vec4 v1 = {-100.0f, -200.0f, -10.0f, -10.0f}; + vec4 e1_0 = {-100.0f, 0.0f, -11.0f, -11.0f}; + vec4 e1_1 = {50.0f, 10.0f, 20.0f, 20.0f}; + vec4 e2_0 = {-180.0f, -300.0f, -93.0f, -93.0f}; + vec4 e2_1 = {100.0f, 120.0f, -10.0f, -10.0f}; + vec4 e3_0 = {-12.0f, 100.0f, 0.0f, 0.0f}; + vec4 e3_1 = {100.0f, 200.0f, 10.0f, 10.0f}; + vec4 v2; + + GLM(vec4_smoothstep)(e1_0, e1_1, v1, v2); + ASSERT(test_eq_th(v2[0], 0.0f, 1e-5f)) + ASSERT(test_eq_th(v2[1], 0.0f, 1e-5f)) + ASSERT(v2[2] > 0.0f && v2[2] < 0.1f) + ASSERT(v2[3] > 0.0f && v2[3] < 0.1f) + + GLM(vec4_smoothstep)(e2_0, e2_1, v1, v2); + ASSERT(v2[0] > 0.0f && v2[0] < 0.25f) + ASSERT(v2[1] > 0.0f && v2[1] < 0.15f) + ASSERT(test_eq_th(v2[2], 1.0f, 1e-5f)) + ASSERT(test_eq_th(v2[3], 1.0f, 1e-5f)) + + GLM(vec4_smoothstep)(e3_0, e3_1, v1, v2); + ASSERT(test_eq_th(v2[0], 0.0f, 1e-5f)) + ASSERT(test_eq_th(v2[1], 0.0f, 1e-5f)) + ASSERT(test_eq_th(v2[2], 0.0f, 1e-5f)) + ASSERT(test_eq_th(v2[3], 0.0f, 1e-5f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_smoothinterp) { + vec4 e1_0 = {-100.0f, 0.0f, -11.0f, -11.0f}; + vec4 e1_1 = {50.0f, 10.0f, 20.0f, 20.0f}; + vec4 e2_0 = {80.0f, -220.0f, -19.0f, -19.0f}; + vec4 e2_1 = {100.0f, -200.0f, -10.0f, -10.0f}; + vec4 e3_0 = {-12.0f, 100.0f, 0.0f, 0.0f}; + vec4 e3_1 = {100.0f, 200.0f, 10.0f, 10.0f}; + vec4 v2; + + GLM(vec4_smoothinterp)(e1_0, e1_1, 0.5f, v2); + ASSERT(v2[0] >= e1_0[0] && v2[0] <= e1_1[0]) + ASSERT(v2[1] >= e1_0[1] && v2[1] <= e1_1[1]) + ASSERT(v2[2] >= e1_0[2] && v2[2] <= e1_1[2]) + ASSERT(v2[3] >= e1_0[3] && v2[3] <= e1_1[3]) + + GLM(vec4_smoothinterp)(e2_0, e2_1, 0.5f, v2); + ASSERT(v2[0] >= e2_0[0] && v2[0] <= e2_1[0]) + ASSERT(v2[1] >= e2_0[1] && v2[1] <= e2_1[1]) + ASSERT(v2[2] >= e2_0[2] && v2[2] <= e2_1[2]) + ASSERT(v2[3] >= e2_0[3] && v2[3] <= e2_1[3]) + + GLM(vec4_smoothinterp)(e3_0, e3_1, 1.0f, v2); + ASSERT(v2[0] >= e3_0[0] && v2[0] <= e3_1[0]) + ASSERT(v2[1] >= e3_0[1] && v2[1] <= e3_1[1]) + ASSERT(v2[2] >= e3_0[2] && v2[2] <= e3_1[2]) + ASSERT(v2[3] >= e3_0[3] && v2[3] <= e3_1[3]) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_smoothinterpc) { + vec4 e1_0 = {-100.0f, 0.0f, -11.0f, -11.0f}; + vec4 e1_1 = {50.0f, 10.0f, 20.0f, 20.0f}; + vec4 e2_0 = {80.0f, -220.0f, -19.0f, -19.0f}; + vec4 e2_1 = {100.0f, -200.0f, -10.0f, -10.0f}; + vec4 e3_0 = {-12.0f, 100.0f, 0.0f, 0.0f}; + vec4 e3_1 = {100.0f, 200.0f, 10.0f, 10.0f}; + vec4 v2; + + GLM(vec4_smoothinterpc)(e1_0, e1_1, -0.5f, v2); + ASSERT(v2[0] >= e1_0[0] && v2[0] <= e1_1[0]) + ASSERT(v2[1] >= e1_0[1] && v2[1] <= e1_1[1]) + ASSERT(v2[2] >= e1_0[2] && v2[2] <= e1_1[2]) + ASSERT(v2[3] >= e1_0[3] && v2[3] <= e1_1[3]) + + GLM(vec4_smoothinterpc)(e2_0, e2_1, 0.5f, v2); + ASSERT(v2[0] >= e2_0[0] && v2[0] <= e2_1[0]) + ASSERT(v2[1] >= e2_0[1] && v2[1] <= e2_1[1]) + ASSERT(v2[2] >= e2_0[2] && v2[2] <= e2_1[2]) + ASSERT(v2[3] >= e2_0[3] && v2[3] <= e2_1[3]) + + GLM(vec4_smoothinterpc)(e3_0, e3_1, 2.0f, v2); + ASSERT(v2[0] >= e3_0[0] && v2[0] <= e3_1[0]) + ASSERT(v2[1] >= e3_0[1] && v2[1] <= e3_1[1]) + ASSERT(v2[2] >= e3_0[2] && v2[2] <= e3_1[2]) + ASSERT(v2[3] >= e3_0[3] && v2[3] <= e3_1[3]) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_cubic) { + vec4 v1 = {125.0f, 25.0f, 5.0f, 1.0f}; + vec4 v2 = {216.0f, 36.0f, 6.0f, 1.0f}; + vec4 v3, v4; + + ASSERT(test_eq(v1[0], v1[2] * v1[2] * v1[2])) + ASSERT(test_eq(v1[1], v1[2] * v1[2])) + ASSERT(test_eq(v1[3], 1.0f)) + + ASSERT(test_eq(v2[0], v2[2] * v2[2] * v2[2])) + ASSERT(test_eq(v2[1], v2[2] * v2[2])) + ASSERT(test_eq(v2[3], 1.0f)) + + GLM(vec4_cubic)(test_rand(), v3); + ASSERT(test_eq(v3[0], v3[2] * v3[2] * v3[2])) + ASSERT(test_eq(v3[1], v3[2] * v3[2])) + ASSERT(test_eq(v3[3], 1.0f)) + + GLM(vec4_cubic)(test_rand(), v4); + ASSERT(test_eq(v4[0], v4[2] * v4[2] * v4[2])) + ASSERT(test_eq(v4[1], v4[2] * v4[2])) + ASSERT(test_eq(v4[3], 1.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_swizzle) { + vec4 v; + + /* ZYX */ + v[0] = 1; + v[1] = 2; + v[2] = 3; + v[3] = 4; + + glm_vec4_swizzle(v, GLM_WZYX, v); + ASSERTIFY(test_assert_vec4_eq(v, (vec4){4, 3, 2, 1})) + + glm_vec4_swizzle(v, GLM_XXXX, v); + ASSERTIFY(test_assert_vec4_eq(v, (vec4){4, 4, 4, 4})) + + v[0] = 1; + v[1] = 2; + v[2] = 3; + v[3] = 4; + + glm_vec4_swizzle(v, GLM_YYYY, v); + ASSERTIFY(test_assert_vec4_eq(v, (vec4){2, 2, 2, 2})) + + v[0] = 1; + v[1] = 2; + v[2] = 3; + v[3] = 4; + + glm_vec4_swizzle(v, GLM_ZZZZ, v); + ASSERTIFY(test_assert_vec4_eq(v, (vec4){3, 3, 3, 3})) + + v[0] = 1; + v[1] = 2; + v[2] = 3; + v[3] = 4; + + glm_vec4_swizzle(v, GLM_WWWW, v); + ASSERTIFY(test_assert_vec4_eq(v, (vec4){4, 4, 4, 4})) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_broadcast) { + vec4 v1, v2, v3; + vec4 v5 = {-1.456f, -1.456f, -1.456f, -1.456f}; + vec4 v6 = {11.0f, 11.0f, 11.0f, 11.0f}; + vec4 v7 = {78.0f, 78.0f, 78.0f, 78.0f}; + + GLM(vec4_broadcast)(-1.456f, v1); + GLM(vec4_broadcast)(11.0f, v2); + GLM(vec4_broadcast)(78.0f, v3); + + ASSERTIFY(test_assert_vec4_eq(v1, v5)) + ASSERTIFY(test_assert_vec4_eq(v2, v6)) + ASSERTIFY(test_assert_vec4_eq(v3, v7)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_fill) { + vec4 v1, v2, v3; + vec4 v5 = {-1.456f, -1.456f, -1.456f, -1.456f}; + vec4 v6 = {11.0f, 11.0f, 11.0f, 11.0f}; + vec4 v7 = {78.0f, 78.0f, 78.0f, 78.0f}; + + GLM(vec4_fill)(v1, -1.456f); + GLM(vec4_fill)(v2, 11.0f); + GLM(vec4_fill)(v3, 78.0f); + + ASSERTIFY(test_assert_vec4_eq(v1, v5)) + ASSERTIFY(test_assert_vec4_eq(v2, v6)) + ASSERTIFY(test_assert_vec4_eq(v3, v7)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_eq) { + vec4 v1, v2, v3; + + GLM(vec4_fill)(v1, -1.456f); + GLM(vec4_fill)(v2, 11.0f); + GLM(vec4_fill)(v3, 78.1f); + + ASSERT(GLM(vec4_eq)(v1, -1.456f)) + ASSERT(GLM(vec4_eq)(v2, 11.0f)) + ASSERT(!GLM(vec4_eq)(v3, 78.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_eq_eps) { + vec4 v1, v2, v3; + + GLM(vec4_fill)(v1, -1.456f); + GLM(vec4_fill)(v2, 11.0f); + GLM(vec4_fill)(v3, 78.1f); + + ASSERT(GLM(vec4_eq_eps)(v1, -1.456f)) + ASSERT(GLM(vec4_eq_eps)(v2, 11.0f)) + ASSERT(!GLM(vec4_eq_eps)(v3, 78.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_eq_all) { + vec4 v1, v2, v3; + vec4 v4 = {2.104f, -3.012f, -4.10f, -4.10f}; + vec4 v5 = {-12.35f, -31.140f, -43.502f, -43.502f}; + + GLM(vec4_fill)(v1, -1.456f); + GLM(vec4_fill)(v2, 11.0f); + GLM(vec4_fill)(v3, 78.0f); + + ASSERT(GLM(vec4_eq_all)(v1)) + ASSERT(GLM(vec4_eq_all)(v2)) + ASSERT(GLM(vec4_eq_all)(v3)) + ASSERT(!GLM(vec4_eq_all)(v4)) + ASSERT(!GLM(vec4_eq_all)(v5)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_eqv) { + vec4 v1, v2, v3, v4, v5; + vec4 v6 = {-1.456f, -1.456f, -1.456f, -1.456f}; + vec4 v7 = {11.0f, 11.0f, 11.0f, 11.0f}; + vec4 v8 = {78.0f, 78.0f, -43.502f, -43.502f}; + + GLM(vec4_fill)(v1, -1.456f); + GLM(vec4_fill)(v2, 11.0f); + GLM(vec4_fill)(v3, 78.0f); + + test_rand_vec4(v4); + test_rand_vec4(v5); + + ASSERT(GLM(vec4_eqv)(v1, v6)) + ASSERT(GLM(vec4_eqv)(v2, v7)) + ASSERT(!GLM(vec4_eqv)(v3, v8)) + ASSERT(!GLM(vec4_eqv)(v4, v5)) + ASSERT(GLM(vec4_eqv)(v5, v5)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_eqv_eps) { + vec4 v1, v2, v3, v4, v5; + vec4 v6 = {-1.456f, -1.456f, -1.456f, -1.456f}; + vec4 v7 = {11.0f, 11.0f, 11.0f, 11.0f}; + vec4 v8 = {78.0f, 78.0f, -43.502f, -43.502f}; + + GLM(vec4_fill)(v1, -1.456f); + GLM(vec4_fill)(v2, 11.0f); + GLM(vec4_fill)(v3, 78.0f); + + test_rand_vec4(v4); + test_rand_vec4(v5); + + ASSERT(GLM(vec4_eqv_eps)(v1, v6)) + ASSERT(GLM(vec4_eqv_eps)(v2, v7)) + ASSERT(!GLM(vec4_eqv_eps)(v3, v8)) + ASSERT(!GLM(vec4_eqv_eps)(v4, v5)) + ASSERT(GLM(vec4_eqv_eps)(v5, v5)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_max) { + vec4 v1 = {2.104f, -3.012f, -4.10f, -4.10f}; + vec4 v2 = {-12.35f, -31.140f, -43.502f, -43.502f}; + vec4 v3 = {INFINITY, 0.0f, 0.0f, 0.0f}; + vec4 v4 = {NAN, INFINITY, 2.0f, 2.0f}; + vec4 v5 = {NAN, -1.0f, -1.0f, -1.0f}; + vec4 v6 = {-1.0f, -11.0f, 11.0f, 11.0f}; + + ASSERT(test_eq(GLM(vec4_max)(v1), 2.104f)) + ASSERT(test_eq(GLM(vec4_max)(v2), -12.35f)) + ASSERT(isinf(GLM(vec4_max)(v3))) + ASSERT(isnan(GLM(vec4_max)(v4))) + ASSERT(isnan(GLM(vec4_max)(v5))) + ASSERT(test_eq(GLM(vec4_max)(v6), 11.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_min) { + vec4 v1 = {2.104f, -3.012f, -4.10f, -4.10f}; + vec4 v2 = {-12.35f, -31.140f, -43.502f, -43.502f}; + vec4 v3 = {INFINITY, 0.0f, 0.0f, 0.0f}; + vec4 v4 = {NAN, INFINITY, 2.0f, 2.0f}; + vec4 v5 = {NAN, -1.0f, -1.0f, -1.0f}; + vec4 v6 = {-1.0f, -11.0f, 11.0f, 11.0f}; + + ASSERT(test_eq(GLM(vec4_min)(v1), -4.10f)) + ASSERT(test_eq(GLM(vec4_min)(v2), -43.502f)) + ASSERT(test_eq(GLM(vec4_min)(v3), 0.0f)) + ASSERT(isnan(GLM(vec4_min)(v4))) + ASSERT(isnan(GLM(vec4_min)(v5))) + ASSERT(test_eq(GLM(vec4_min)(v6), -11.0f)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_isnan) { + vec4 v1 = {2.104f, -3.012f, -4.10f, -4.10f}; + vec4 v2 = {-12.35f, -31.140f, -43.502f, -43.502f}; + vec4 v3 = {INFINITY, 0.0f, 0.0f, 0.0f}; + vec4 v4 = {NAN, INFINITY, 2.0f, 2.0f}; + vec4 v5 = {NAN, -1.0f, -1.0f, -1.0f}; + vec4 v6 = {-1.0f, -1.0f, 11.0f, 11.0f}; + + ASSERT(!GLM(vec4_isnan)(v1)) + ASSERT(!GLM(vec4_isnan)(v2)) + ASSERT(!GLM(vec4_isnan)(v3)) + ASSERT(GLM(vec4_isnan)(v4)) + ASSERT(GLM(vec4_isnan)(v5)) + ASSERT(!GLM(vec4_isnan)(v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_isinf) { + vec4 v1 = {2.104f, -3.012f, -4.10f, -4.10f}; + vec4 v2 = {-12.35f, -31.140f, -43.502f, -43.502f}; + vec4 v3 = {INFINITY, 0.0f, 0.0f, 0.0f}; + vec4 v4 = {NAN, INFINITY, 2.0f, 2.0f}; + vec4 v5 = {NAN, -1.0f, -1.0f, -1.0f}; + vec4 v6 = {-1.0f, -1.0f, 11.0f, 11.0f}; + + ASSERT(!GLM(vec4_isinf)(v1)) + ASSERT(!GLM(vec4_isinf)(v2)) + ASSERT(GLM(vec4_isinf)(v3)) + ASSERT(GLM(vec4_isinf)(v4)) + ASSERT(!GLM(vec4_isinf)(v5)) + ASSERT(!GLM(vec4_isinf)(v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_isvalid) { + vec4 v1 = {2.104f, -3.012f, -4.10f, -4.10f}; + vec4 v2 = {-12.35f, -31.140f, -43.502f, -43.502f}; + vec4 v3 = {INFINITY, 0.0f, 0.0f, 0.0f}; + vec4 v4 = {NAN, INFINITY, 2.0f, 2.0f}; + vec4 v5 = {NAN, -1.0f, -1.0f, -1.0f}; + vec4 v6 = {-1.0f, -1.0f, 11.0f, 11.0f}; + + ASSERT(GLM(vec4_isvalid)(v1)) + ASSERT(GLM(vec4_isvalid)(v2)) + ASSERT(!GLM(vec4_isvalid)(v3)) + ASSERT(!GLM(vec4_isvalid)(v4)) + ASSERT(!GLM(vec4_isvalid)(v5)) + ASSERT(GLM(vec4_isvalid)(v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_sign) { + vec4 v1 = {2.104f, -3.012f, -4.10f, -4.10f}; + vec4 v2 = {-12.35f, -31.140f, -43.502f, -43.502f}; + vec4 v3, v4; + vec4 v5 = {1.0f, -1.0f, -1.0f, -1.0f}; + vec4 v6 = {-1.0f, -1.0f, -1.0f, -1.0f}; + + GLM(vec4_sign)(v1, v3); + GLM(vec4_sign)(v2, v4); + + ASSERTIFY(test_assert_vec4_eq(v3, v5)) + ASSERTIFY(test_assert_vec4_eq(v4, v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_abs) { + vec4 v1 = {2.104f, -3.012f, -4.10f, -4.10f}; + vec4 v2 = {-12.35f, -31.140f, -43.502f, -43.502f}; + vec4 v3, v4; + vec4 v5 = {2.104f, 3.012f, 4.10f, 4.10f}; + vec4 v6 = {12.35f, 31.140f, 43.502f, 43.502f}; + + GLM(vec4_abs)(v1, v3); + GLM(vec4_abs)(v2, v4); + + ASSERTIFY(test_assert_vec4_eq(v3, v5)) + ASSERTIFY(test_assert_vec4_eq(v4, v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_fract) { + vec4 v1 = {2.104f, 3.012f, 4.10f, 4.10f}; + vec4 v2 = {12.35f, 31.140f, 43.502f, 43.502f}; + vec4 v3, v4; + vec4 v5 = {0.104f, 0.012f, 0.10f, 0.10f}; + vec4 v6 = {0.35f, 0.140f, 0.502f, 0.502f}; + + GLM(vec4_fract)(v1, v3); + GLM(vec4_fract)(v2, v4); + + ASSERTIFY(test_assert_vec4_eq(v3, v5)) + ASSERTIFY(test_assert_vec4_eq(v4, v6)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_hadd) { + vec4 v1 = {2.0f, 3.0f, 4.0f, 4.0f}, v2 = {12.0f, 31.0f, 43.0f, 43.0f}; + float r1, r2, r3, r4; + + r1 = GLM(vec4_hadd)(v1); + r2 = GLM(vec4_hadd)(v2); + + r3 = v1[0] + v1[1] + v1[2] + v1[3]; + r4 = v2[0] + v2[1] + v2[2] + v2[3]; + + ASSERT(test_eq(r1, r3)) + ASSERT(test_eq(r2, r4)) + + TEST_SUCCESS +} + +TEST_IMPL(GLM_PREFIX, vec4_sqrt) { + vec4 v1 = {2.0f, 3.0f, 4.0f, 4.0f}, v2 = {12.0f, 31.0f, 43.0f, 43.0f}; + vec4 v3, v4; + + GLM(vec4_sqrt)(v1, v3); + GLM(vec4_sqrt)(v2, v4); + + ASSERT(test_eq(sqrtf(v1[0]), v3[0])) + ASSERT(test_eq(sqrtf(v1[1]), v3[1])) + ASSERT(test_eq(sqrtf(v1[2]), v3[2])) + ASSERT(test_eq(sqrtf(v1[3]), v3[3])) + + ASSERT(test_eq(sqrtf(v2[0]), v4[0])) + ASSERT(test_eq(sqrtf(v2[1]), v4[1])) + ASSERT(test_eq(sqrtf(v2[2]), v4[2])) + ASSERT(test_eq(sqrtf(v2[3]), v4[3])) + + TEST_SUCCESS +} diff --git a/test/src/tests.c b/test/src/tests.c new file mode 100644 index 0000000..ee7c488 --- /dev/null +++ b/test/src/tests.c @@ -0,0 +1,47 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +/* test inline */ +/*---------------------------------------------------------------------------*/ + + +#define GLM_PREFIX glm_ +#define GLM(X) (glm_ ## X) + +#include "test_vec3.h" +#include "test_vec4.h" +#include "test_mat3.h" +#include "test_mat4.h" +#include "test_quat.h" +#include "test_project.h" +#include "test_plane.h" +#include "test_affine.h" +#include "test_affine_mat.h" + +#undef GLM +#undef GLM_PREFIX + +/* test pre-compiled */ +/*---------------------------------------------------------------------------*/ + +#define GLM_PREFIX glmc_ +#define GLM(X) (glmc_ ## X) + +#include "test_vec3.h" +#include "test_vec4.h" +#include "test_mat3.h" +#include "test_mat4.h" +#include "test_quat.h" +#include "test_project.h" +#include "test_plane.h" +#include "test_affine.h" +#include "test_affine_mat.h" + +#undef GLM +#undef GLM_PREFIX + +/*---------------------------------------------------------------------------*/ diff --git a/test/tests.h b/test/tests.h new file mode 100644 index 0000000..e4933e9 --- /dev/null +++ b/test/tests.h @@ -0,0 +1,1163 @@ +/* + * Copyright (c), Recep Aslantas. + * + * MIT License (MIT), http://opensource.org/licenses/MIT + * Full license can be found in the LICENSE file + */ + +#ifndef tests_h +#define tests_h + +#include "include/common.h" + +/* + * To register a test: + * 1. use TEST_DECLARE() to forward declare test + * 2. use TEST_ENTRY() to add test to list + */ + +/* affine mat */ +TEST_DECLARE(glm_mul) +TEST_DECLARE(glm_mul) +TEST_DECLARE(glm_inv_tr) + +TEST_DECLARE(glmc_mul) +TEST_DECLARE(glmc_mul_rot) +TEST_DECLARE(glmc_inv_tr) + +/* affine */ +TEST_DECLARE(glm_translate) +TEST_DECLARE(glm_translate_to) +TEST_DECLARE(glm_translate_x) +TEST_DECLARE(glm_translate_y) +TEST_DECLARE(glm_translate_z) +TEST_DECLARE(glm_translate_make) +TEST_DECLARE(glm_scale_to) +TEST_DECLARE(glm_scale_make) +TEST_DECLARE(glm_scale) +TEST_DECLARE(glm_scale_uni) +TEST_DECLARE(glm_rotate_x) +TEST_DECLARE(glm_rotate_y) +TEST_DECLARE(glm_rotate_z) +TEST_DECLARE(glm_rotate_make) +TEST_DECLARE(glm_rotate) +TEST_DECLARE(glm_rotate_at) +TEST_DECLARE(glm_rotate_atm) +TEST_DECLARE(glm_decompose_scalev) +TEST_DECLARE(glm_uniscaled) +TEST_DECLARE(glm_decompose_rs) +TEST_DECLARE(glm_decompose) + +TEST_DECLARE(glmc_translate) +TEST_DECLARE(glmc_translate_to) +TEST_DECLARE(glmc_translate_x) +TEST_DECLARE(glmc_translate_y) +TEST_DECLARE(glmc_translate_z) +TEST_DECLARE(glmc_translate_make) +TEST_DECLARE(glmc_scale_to) +TEST_DECLARE(glmc_scale_make) +TEST_DECLARE(glmc_scale) +TEST_DECLARE(glmc_scale_uni) +TEST_DECLARE(glmc_rotate_x) +TEST_DECLARE(glmc_rotate_y) +TEST_DECLARE(glmc_rotate_z) +TEST_DECLARE(glmc_rotate_make) +TEST_DECLARE(glmc_rotate) +TEST_DECLARE(glmc_rotate_at) +TEST_DECLARE(glmc_rotate_atm) +TEST_DECLARE(glmc_decompose_scalev) +TEST_DECLARE(glmc_uniscaled) +TEST_DECLARE(glmc_decompose_rs) +TEST_DECLARE(glmc_decompose) + +/* mat4 */ +TEST_DECLARE(glm_mat4_ucopy) +TEST_DECLARE(glm_mat4_copy) +TEST_DECLARE(glm_mat4_identity) +TEST_DECLARE(glm_mat4_identity_array) +TEST_DECLARE(glm_mat4_zero) +TEST_DECLARE(glm_mat4_pick3) +TEST_DECLARE(glm_mat4_pick3t) +TEST_DECLARE(glm_mat4_ins3) +TEST_DECLARE(glm_mat4_mul) +TEST_DECLARE(glm_mat4_mulN) +TEST_DECLARE(glm_mat4_mulv) +TEST_DECLARE(glm_mat4_mulv3) +TEST_DECLARE(glm_mat4_trace) +TEST_DECLARE(glm_mat4_trace3) +TEST_DECLARE(glm_mat4_quat) +TEST_DECLARE(glm_mat4_transpose_to) +TEST_DECLARE(glm_mat4_transpose) +TEST_DECLARE(glm_mat4_scale_p) +TEST_DECLARE(glm_mat4_scale) +TEST_DECLARE(glm_mat4_det) +TEST_DECLARE(glm_mat4_inv) +TEST_DECLARE(glm_mat4_inv_fast) +TEST_DECLARE(glm_mat4_inv_precise) +TEST_DECLARE(glm_mat4_swap_col) +TEST_DECLARE(glm_mat4_swap_row) +TEST_DECLARE(glm_mat4_rmc) + +TEST_DECLARE(glmc_mat4_ucopy) +TEST_DECLARE(glmc_mat4_copy) +TEST_DECLARE(glmc_mat4_identity) +TEST_DECLARE(glmc_mat4_identity_array) +TEST_DECLARE(glmc_mat4_zero) +TEST_DECLARE(glmc_mat4_pick3) +TEST_DECLARE(glmc_mat4_pick3t) +TEST_DECLARE(glmc_mat4_ins3) +TEST_DECLARE(glmc_mat4_mul) +TEST_DECLARE(glmc_mat4_mulN) +TEST_DECLARE(glmc_mat4_mulv) +TEST_DECLARE(glmc_mat4_mulv3) +TEST_DECLARE(glmc_mat4_trace) +TEST_DECLARE(glmc_mat4_trace3) +TEST_DECLARE(glmc_mat4_quat) +TEST_DECLARE(glmc_mat4_transpose_to) +TEST_DECLARE(glmc_mat4_transpose) +TEST_DECLARE(glmc_mat4_scale_p) +TEST_DECLARE(glmc_mat4_scale) +TEST_DECLARE(glmc_mat4_det) +TEST_DECLARE(glmc_mat4_inv) +TEST_DECLARE(glmc_mat4_inv_fast) +TEST_DECLARE(glmc_mat4_swap_col) +TEST_DECLARE(glmc_mat4_swap_row) +TEST_DECLARE(glmc_mat4_rmc) + +/* mat3 */ +TEST_DECLARE(glm_mat3_copy) +TEST_DECLARE(glm_mat3_identity) +TEST_DECLARE(glm_mat3_identity_array) +TEST_DECLARE(glm_mat3_zero) +TEST_DECLARE(glm_mat3_mul) +TEST_DECLARE(glm_mat3_mulv) +TEST_DECLARE(glm_mat3_trace) +TEST_DECLARE(glm_mat3_quat) +TEST_DECLARE(glm_mat3_transpose_to) +TEST_DECLARE(glm_mat3_transpose) +TEST_DECLARE(glm_mat3_scale) +TEST_DECLARE(glm_mat3_det) +TEST_DECLARE(glm_mat3_inv) +TEST_DECLARE(glm_mat3_swap_col) +TEST_DECLARE(glm_mat3_swap_row) +TEST_DECLARE(glm_mat3_rmc) + +TEST_DECLARE(glmc_mat3_copy) +TEST_DECLARE(glmc_mat3_identity) +TEST_DECLARE(glmc_mat3_identity_array) +TEST_DECLARE(glmc_mat3_zero) +TEST_DECLARE(glmc_mat3_mul) +TEST_DECLARE(glmc_mat3_mulv) +TEST_DECLARE(glmc_mat3_trace) +TEST_DECLARE(glmc_mat3_quat) +TEST_DECLARE(glmc_mat3_transpose_to) +TEST_DECLARE(glmc_mat3_transpose) +TEST_DECLARE(glmc_mat3_scale) +TEST_DECLARE(glmc_mat3_det) +TEST_DECLARE(glmc_mat3_inv) +TEST_DECLARE(glmc_mat3_swap_col) +TEST_DECLARE(glmc_mat3_swap_row) +TEST_DECLARE(glmc_mat3_rmc) + +/* camera */ +TEST_DECLARE(camera_lookat) +TEST_DECLARE(camera_decomp) + +/* project */ +TEST_DECLARE(glm_unprojecti) +TEST_DECLARE(glm_unproject) +TEST_DECLARE(glm_project) + +TEST_DECLARE(glmc_unprojecti) +TEST_DECLARE(glmc_unproject) +TEST_DECLARE(glmc_project) + +/* plane */ +TEST_DECLARE(glm_plane_normalize) +TEST_DECLARE(glmc_plane_normalize) + +/* utils */ +TEST_DECLARE(clamp) + +/* euler */ +TEST_DECLARE(euler) + +/* quat */ +TEST_DECLARE(MACRO_GLM_QUAT_IDENTITY_INIT) +TEST_DECLARE(MACRO_GLM_QUAT_IDENTITY) + +TEST_DECLARE(glm_quat_identity) +TEST_DECLARE(glm_quat_identity_array) +TEST_DECLARE(glm_quat_init) +TEST_DECLARE(glm_quatv) +TEST_DECLARE(glm_quat) +TEST_DECLARE(glm_quat_copy) +TEST_DECLARE(glm_quat_norm) +TEST_DECLARE(glm_quat_normalize_to) +TEST_DECLARE(glm_quat_normalize) +TEST_DECLARE(glm_quat_dot) +TEST_DECLARE(glm_quat_conjugate) +TEST_DECLARE(glm_quat_inv) +TEST_DECLARE(glm_quat_add) +TEST_DECLARE(glm_quat_sub) +TEST_DECLARE(glm_quat_real) +TEST_DECLARE(glm_quat_imag) +TEST_DECLARE(glm_quat_imagn) +TEST_DECLARE(glm_quat_imaglen) +TEST_DECLARE(glm_quat_angle) +TEST_DECLARE(glm_quat_axis) +TEST_DECLARE(glm_quat_mul) +TEST_DECLARE(glm_quat_mat4) +TEST_DECLARE(glm_quat_mat4t) +TEST_DECLARE(glm_quat_mat3) +TEST_DECLARE(glm_quat_mat3t) +TEST_DECLARE(glm_quat_lerp) +TEST_DECLARE(glm_quat_lerpc) +TEST_DECLARE(glm_quat_slerp) +TEST_DECLARE(glm_quat_look) +TEST_DECLARE(glm_quat_for) +TEST_DECLARE(glm_quat_forp) +TEST_DECLARE(glm_quat_rotatev) +TEST_DECLARE(glm_quat_rotate) +TEST_DECLARE(glm_quat_rotate_at) +TEST_DECLARE(glm_quat_rotate_atm) + +TEST_DECLARE(glmc_quat_identity) +TEST_DECLARE(glmc_quat_identity_array) +TEST_DECLARE(glmc_quat_init) +TEST_DECLARE(glmc_quatv) +TEST_DECLARE(glmc_quat) +TEST_DECLARE(glmc_quat_copy) +TEST_DECLARE(glmc_quat_norm) +TEST_DECLARE(glmc_quat_normalize_to) +TEST_DECLARE(glmc_quat_normalize) +TEST_DECLARE(glmc_quat_dot) +TEST_DECLARE(glmc_quat_conjugate) +TEST_DECLARE(glmc_quat_inv) +TEST_DECLARE(glmc_quat_add) +TEST_DECLARE(glmc_quat_sub) +TEST_DECLARE(glmc_quat_real) +TEST_DECLARE(glmc_quat_imag) +TEST_DECLARE(glmc_quat_imagn) +TEST_DECLARE(glmc_quat_imaglen) +TEST_DECLARE(glmc_quat_angle) +TEST_DECLARE(glmc_quat_axis) +TEST_DECLARE(glmc_quat_mul) +TEST_DECLARE(glmc_quat_mat4) +TEST_DECLARE(glmc_quat_mat4t) +TEST_DECLARE(glmc_quat_mat3) +TEST_DECLARE(glmc_quat_mat3t) +TEST_DECLARE(glmc_quat_lerp) +TEST_DECLARE(glmc_quat_lerpc) +TEST_DECLARE(glmc_quat_slerp) +TEST_DECLARE(glmc_quat_look) +TEST_DECLARE(glmc_quat_for) +TEST_DECLARE(glmc_quat_forp) +TEST_DECLARE(glmc_quat_rotatev) +TEST_DECLARE(glmc_quat_rotate) +TEST_DECLARE(glmc_quat_rotate_at) +TEST_DECLARE(glmc_quat_rotate_atm) + +/* bezier */ +TEST_DECLARE(bezier) + +/* vec3 */ +TEST_DECLARE(MACRO_GLM_VEC3_ONE_INIT) +TEST_DECLARE(MACRO_GLM_VEC3_ZERO_INIT) +TEST_DECLARE(MACRO_GLM_VEC3_ONE) +TEST_DECLARE(MACRO_GLM_VEC3_ZERO) +TEST_DECLARE(MACRO_GLM_YUP) +TEST_DECLARE(MACRO_GLM_ZUP) +TEST_DECLARE(MACRO_GLM_XUP) +TEST_DECLARE(MACRO_GLM_FORWARD_RH) +TEST_DECLARE(MACRO_GLM_SHUFFLE3) +TEST_DECLARE(MACRO_GLM_XXX) +TEST_DECLARE(MACRO_GLM_YYY) +TEST_DECLARE(MACRO_GLM_ZZZ) +TEST_DECLARE(MACRO_GLM_ZYX) + +TEST_DECLARE(MACRO_glm_vec3_dup) +TEST_DECLARE(MACRO_glm_vec3_flipsign) +TEST_DECLARE(MACRO_glm_vec3_flipsign_to) +TEST_DECLARE(MACRO_glm_vec3_inv) +TEST_DECLARE(MACRO_glm_vec3_inv_to) +TEST_DECLARE(MACRO_glm_vec3_mulv) + +TEST_DECLARE(glm_vec3) +TEST_DECLARE(glm_vec3_copy) +TEST_DECLARE(glm_vec3_zero) +TEST_DECLARE(glm_vec3_one) +TEST_DECLARE(glm_vec3_dot) +TEST_DECLARE(glm_dot) +TEST_DECLARE(glm_vec3_norm2) +TEST_DECLARE(glm_vec3_norm) +TEST_DECLARE(glm_vec3_norm_one) +TEST_DECLARE(glm_vec3_norm_inf) +TEST_DECLARE(glm_vec3_add) +TEST_DECLARE(glm_vec3_adds) +TEST_DECLARE(glm_vec3_sub) +TEST_DECLARE(glm_vec3_subs) +TEST_DECLARE(glm_vec3_mul) +TEST_DECLARE(glm_vec3_scale) +TEST_DECLARE(glm_vec3_scale_as) +TEST_DECLARE(glm_vec3_div) +TEST_DECLARE(glm_vec3_divs) +TEST_DECLARE(glm_vec3_addadd) +TEST_DECLARE(glm_vec3_subadd) +TEST_DECLARE(glm_vec3_muladd) +TEST_DECLARE(glm_vec3_muladds) +TEST_DECLARE(glm_vec3_maxadd) +TEST_DECLARE(glm_vec3_minadd) +TEST_DECLARE(glm_vec3_negate_to) +TEST_DECLARE(glm_vec3_negate) +TEST_DECLARE(glm_vec3_normalize) +TEST_DECLARE(glm_vec3_normalize_to) +TEST_DECLARE(glm_normalize) +TEST_DECLARE(glm_normalize_to) +TEST_DECLARE(glm_vec3_cross) +TEST_DECLARE(glm_vec3_crossn) +TEST_DECLARE(glm_cross) +TEST_DECLARE(glm_vec3_angle) +TEST_DECLARE(glm_vec3_rotate) +TEST_DECLARE(glm_vec3_rotate_m4) +TEST_DECLARE(glm_vec3_rotate_m3) +TEST_DECLARE(glm_vec3_proj) +TEST_DECLARE(glm_vec3_center) +TEST_DECLARE(glm_vec3_distance2) +TEST_DECLARE(glm_vec3_distance) +TEST_DECLARE(glm_vec3_maxv) +TEST_DECLARE(glm_vec3_minv) +TEST_DECLARE(glm_vec3_ortho) +TEST_DECLARE(glm_vec3_clamp) +TEST_DECLARE(glm_vec3_mix) +TEST_DECLARE(glm_vec3_mixc) +TEST_DECLARE(glm_vec3_step_uni) +TEST_DECLARE(glm_vec3_step) +TEST_DECLARE(glm_vec3_smoothstep_uni) +TEST_DECLARE(glm_vec3_smoothstep) +TEST_DECLARE(glm_vec3_smoothinterp) +TEST_DECLARE(glm_vec3_smoothinterpc) +TEST_DECLARE(glm_vec3_swizzle) +TEST_DECLARE(glm_vec3_broadcast) +TEST_DECLARE(glm_vec3_fill) +TEST_DECLARE(glm_vec3_eq) +TEST_DECLARE(glm_vec3_eq_eps) +TEST_DECLARE(glm_vec3_eq_all) +TEST_DECLARE(glm_vec3_eqv) +TEST_DECLARE(glm_vec3_eqv_eps) +TEST_DECLARE(glm_vec3_max) +TEST_DECLARE(glm_vec3_min) +TEST_DECLARE(glm_vec3_isnan) +TEST_DECLARE(glm_vec3_isinf) +TEST_DECLARE(glm_vec3_isvalid) +TEST_DECLARE(glm_vec3_sign) +TEST_DECLARE(glm_vec3_abs) +TEST_DECLARE(glm_vec3_fract) +TEST_DECLARE(glm_vec3_hadd) +TEST_DECLARE(glm_vec3_sqrt) + +TEST_DECLARE(glmc_vec3) +TEST_DECLARE(glmc_vec3_copy) +TEST_DECLARE(glmc_vec3_zero) +TEST_DECLARE(glmc_vec3_one) +TEST_DECLARE(glmc_vec3_dot) +TEST_DECLARE(glmc_vec3_norm2) +TEST_DECLARE(glmc_vec3_norm) +TEST_DECLARE(glmc_vec3_norm_one) +TEST_DECLARE(glmc_vec3_norm_inf) +TEST_DECLARE(glmc_vec3_add) +TEST_DECLARE(glmc_vec3_adds) +TEST_DECLARE(glmc_vec3_sub) +TEST_DECLARE(glmc_vec3_subs) +TEST_DECLARE(glmc_vec3_mul) +TEST_DECLARE(glmc_vec3_scale) +TEST_DECLARE(glmc_vec3_scale_as) +TEST_DECLARE(glmc_vec3_div) +TEST_DECLARE(glmc_vec3_divs) +TEST_DECLARE(glmc_vec3_addadd) +TEST_DECLARE(glmc_vec3_subadd) +TEST_DECLARE(glmc_vec3_muladd) +TEST_DECLARE(glmc_vec3_muladds) +TEST_DECLARE(glmc_vec3_maxadd) +TEST_DECLARE(glmc_vec3_minadd) +TEST_DECLARE(glmc_vec3_negate_to) +TEST_DECLARE(glmc_vec3_negate) +TEST_DECLARE(glmc_vec3_normalize) +TEST_DECLARE(glmc_vec3_normalize_to) +TEST_DECLARE(glmc_vec3_cross) +TEST_DECLARE(glmc_vec3_crossn) +TEST_DECLARE(glmc_vec3_angle) +TEST_DECLARE(glmc_vec3_rotate) +TEST_DECLARE(glmc_vec3_rotate_m4) +TEST_DECLARE(glmc_vec3_rotate_m3) +TEST_DECLARE(glmc_vec3_proj) +TEST_DECLARE(glmc_vec3_center) +TEST_DECLARE(glmc_vec3_distance2) +TEST_DECLARE(glmc_vec3_distance) +TEST_DECLARE(glmc_vec3_maxv) +TEST_DECLARE(glmc_vec3_minv) +TEST_DECLARE(glmc_vec3_ortho) +TEST_DECLARE(glmc_vec3_clamp) +TEST_DECLARE(glmc_vec3_mix) +TEST_DECLARE(glmc_vec3_mixc) +TEST_DECLARE(glmc_vec3_step_uni) +TEST_DECLARE(glmc_vec3_step) +TEST_DECLARE(glmc_vec3_smoothstep_uni) +TEST_DECLARE(glmc_vec3_smoothstep) +TEST_DECLARE(glmc_vec3_smoothinterp) +TEST_DECLARE(glmc_vec3_smoothinterpc) +TEST_DECLARE(glmc_vec3_swizzle) +TEST_DECLARE(glmc_vec3_broadcast) +TEST_DECLARE(glmc_vec3_fill) +TEST_DECLARE(glmc_vec3_eq) +TEST_DECLARE(glmc_vec3_eq_eps) +TEST_DECLARE(glmc_vec3_eq_all) +TEST_DECLARE(glmc_vec3_eqv) +TEST_DECLARE(glmc_vec3_eqv_eps) +TEST_DECLARE(glmc_vec3_max) +TEST_DECLARE(glmc_vec3_min) +TEST_DECLARE(glmc_vec3_isnan) +TEST_DECLARE(glmc_vec3_isinf) +TEST_DECLARE(glmc_vec3_isvalid) +TEST_DECLARE(glmc_vec3_sign) +TEST_DECLARE(glmc_vec3_abs) +TEST_DECLARE(glmc_vec3_fract) +TEST_DECLARE(glmc_vec3_hadd) +TEST_DECLARE(glmc_vec3_sqrt) + +/* vec4 */ + +TEST_DECLARE(MACRO_GLM_VEC4_ONE_INIT) +TEST_DECLARE(MACRO_GLM_VEC4_ZERO_INIT) +TEST_DECLARE(MACRO_GLM_VEC4_ONE) +TEST_DECLARE(MACRO_GLM_VEC4_ZERO) +TEST_DECLARE(MACRO_GLM_XXXX) +TEST_DECLARE(MACRO_GLM_YYYY) +TEST_DECLARE(MACRO_GLM_ZZZZ) +TEST_DECLARE(MACRO_GLM_WZYX) +TEST_DECLARE(MACRO_glm_vec4_dup) +TEST_DECLARE(MACRO_glm_vec4_flipsign) +TEST_DECLARE(MACRO_glm_vec4_flipsign_to) +TEST_DECLARE(MACRO_glm_vec4_inv) +TEST_DECLARE(MACRO_glm_vec4_inv_to) +TEST_DECLARE(MACRO_glm_vec4_mulv) + +TEST_DECLARE(glm_vec4) +TEST_DECLARE(glm_vec4_copy3) +TEST_DECLARE(glm_vec4_copy) +TEST_DECLARE(glm_vec4_ucopy) +TEST_DECLARE(glm_vec4_zero) +TEST_DECLARE(glm_vec4_one) +TEST_DECLARE(glm_vec4_dot) +TEST_DECLARE(glm_vec4_norm2) +TEST_DECLARE(glm_vec4_norm) +TEST_DECLARE(glm_vec4_norm_one) +TEST_DECLARE(glm_vec4_norm_inf) +TEST_DECLARE(glm_vec4_add) +TEST_DECLARE(glm_vec4_adds) +TEST_DECLARE(glm_vec4_sub) +TEST_DECLARE(glm_vec4_subs) +TEST_DECLARE(glm_vec4_mul) +TEST_DECLARE(glm_vec4_scale) +TEST_DECLARE(glm_vec4_scale_as) +TEST_DECLARE(glm_vec4_div) +TEST_DECLARE(glm_vec4_divs) +TEST_DECLARE(glm_vec4_addadd) +TEST_DECLARE(glm_vec4_subadd) +TEST_DECLARE(glm_vec4_muladd) +TEST_DECLARE(glm_vec4_muladds) +TEST_DECLARE(glm_vec4_maxadd) +TEST_DECLARE(glm_vec4_minadd) +TEST_DECLARE(glm_vec4_negate_to) +TEST_DECLARE(glm_vec4_negate) +TEST_DECLARE(glm_vec4_normalize) +TEST_DECLARE(glm_vec4_normalize_to) +TEST_DECLARE(glm_vec4_distance2) +TEST_DECLARE(glm_vec4_distance) +TEST_DECLARE(glm_vec4_maxv) +TEST_DECLARE(glm_vec4_minv) +TEST_DECLARE(glm_vec4_clamp) +TEST_DECLARE(glm_vec4_lerp) +TEST_DECLARE(glm_vec4_lerpc) +TEST_DECLARE(glm_vec4_mix) +TEST_DECLARE(glm_vec4_mixc) +TEST_DECLARE(glm_vec4_step_uni) +TEST_DECLARE(glm_vec4_step) +TEST_DECLARE(glm_vec4_smoothstep_uni) +TEST_DECLARE(glm_vec4_smoothstep) +TEST_DECLARE(glm_vec4_smoothinterp) +TEST_DECLARE(glm_vec4_smoothinterpc) +TEST_DECLARE(glm_vec4_cubic) +TEST_DECLARE(glm_vec4_swizzle) +TEST_DECLARE(glm_vec4_broadcast) +TEST_DECLARE(glm_vec4_fill) +TEST_DECLARE(glm_vec4_eq) +TEST_DECLARE(glm_vec4_eq_eps) +TEST_DECLARE(glm_vec4_eq_all) +TEST_DECLARE(glm_vec4_eqv) +TEST_DECLARE(glm_vec4_eqv_eps) +TEST_DECLARE(glm_vec4_max) +TEST_DECLARE(glm_vec4_min) +TEST_DECLARE(glm_vec4_isnan) +TEST_DECLARE(glm_vec4_isinf) +TEST_DECLARE(glm_vec4_isvalid) +TEST_DECLARE(glm_vec4_sign) +TEST_DECLARE(glm_vec4_abs) +TEST_DECLARE(glm_vec4_fract) +TEST_DECLARE(glm_vec4_hadd) +TEST_DECLARE(glm_vec4_sqrt) + +TEST_DECLARE(glmc_vec4) +TEST_DECLARE(glmc_vec4_copy3) +TEST_DECLARE(glmc_vec4_copy) +TEST_DECLARE(glmc_vec4_ucopy) +TEST_DECLARE(glmc_vec4_zero) +TEST_DECLARE(glmc_vec4_one) +TEST_DECLARE(glmc_vec4_dot) +TEST_DECLARE(glmc_vec4_norm2) +TEST_DECLARE(glmc_vec4_norm) +TEST_DECLARE(glmc_vec4_norm_one) +TEST_DECLARE(glmc_vec4_norm_inf) +TEST_DECLARE(glmc_vec4_add) +TEST_DECLARE(glmc_vec4_adds) +TEST_DECLARE(glmc_vec4_sub) +TEST_DECLARE(glmc_vec4_subs) +TEST_DECLARE(glmc_vec4_mul) +TEST_DECLARE(glmc_vec4_scale) +TEST_DECLARE(glmc_vec4_scale_as) +TEST_DECLARE(glmc_vec4_div) +TEST_DECLARE(glmc_vec4_divs) +TEST_DECLARE(glmc_vec4_addadd) +TEST_DECLARE(glmc_vec4_subadd) +TEST_DECLARE(glmc_vec4_muladd) +TEST_DECLARE(glmc_vec4_muladds) +TEST_DECLARE(glmc_vec4_maxadd) +TEST_DECLARE(glmc_vec4_minadd) +TEST_DECLARE(glmc_vec4_negate_to) +TEST_DECLARE(glmc_vec4_negate) +TEST_DECLARE(glmc_vec4_normalize) +TEST_DECLARE(glmc_vec4_normalize_to) +TEST_DECLARE(glmc_vec4_distance2) +TEST_DECLARE(glmc_vec4_distance) +TEST_DECLARE(glmc_vec4_maxv) +TEST_DECLARE(glmc_vec4_minv) +TEST_DECLARE(glmc_vec4_clamp) +TEST_DECLARE(glmc_vec4_lerp) +TEST_DECLARE(glmc_vec4_lerpc) +TEST_DECLARE(glmc_vec4_mix) +TEST_DECLARE(glmc_vec4_mixc) +TEST_DECLARE(glmc_vec4_step_uni) +TEST_DECLARE(glmc_vec4_step) +TEST_DECLARE(glmc_vec4_smoothstep_uni) +TEST_DECLARE(glmc_vec4_smoothstep) +TEST_DECLARE(glmc_vec4_smoothinterp) +TEST_DECLARE(glmc_vec4_smoothinterpc) +TEST_DECLARE(glmc_vec4_cubic) +TEST_DECLARE(glmc_vec4_swizzle) +TEST_DECLARE(glmc_vec4_broadcast) +TEST_DECLARE(glmc_vec4_fill) +TEST_DECLARE(glmc_vec4_eq) +TEST_DECLARE(glmc_vec4_eq_eps) +TEST_DECLARE(glmc_vec4_eq_all) +TEST_DECLARE(glmc_vec4_eqv) +TEST_DECLARE(glmc_vec4_eqv_eps) +TEST_DECLARE(glmc_vec4_max) +TEST_DECLARE(glmc_vec4_min) +TEST_DECLARE(glmc_vec4_isnan) +TEST_DECLARE(glmc_vec4_isinf) +TEST_DECLARE(glmc_vec4_isvalid) +TEST_DECLARE(glmc_vec4_sign) +TEST_DECLARE(glmc_vec4_abs) +TEST_DECLARE(glmc_vec4_fract) +TEST_DECLARE(glmc_vec4_hadd) +TEST_DECLARE(glmc_vec4_sqrt) + +/* structs */ + +TEST_DECLARE(mat3s_identity_init) +TEST_DECLARE(mat3s_zero_init) +TEST_DECLARE(mat4s_identity_init) +TEST_DECLARE(mat4s_zero_init) +TEST_DECLARE(quats_zero_init) +TEST_DECLARE(vec3s_one_init) +TEST_DECLARE(vec3s_zero_init) +TEST_DECLARE(vec4s_black_init) +TEST_DECLARE(vec4s_one_init) +TEST_DECLARE(vec4s_zero_init) + +/*****************************************************************************/ + +TEST_LIST { + /* affine mat */ + TEST_ENTRY(glm_mul) + TEST_ENTRY(glm_mul) + TEST_ENTRY(glm_inv_tr) + + TEST_ENTRY(glmc_mul) + TEST_ENTRY(glmc_mul_rot) + TEST_ENTRY(glmc_inv_tr) + + /* affine */ + TEST_ENTRY(glm_translate) + TEST_ENTRY(glm_translate_to) + TEST_ENTRY(glm_translate_x) + TEST_ENTRY(glm_translate_y) + TEST_ENTRY(glm_translate_z) + TEST_ENTRY(glm_translate_make) + TEST_ENTRY(glm_scale_to) + TEST_ENTRY(glm_scale_make) + TEST_ENTRY(glm_scale) + TEST_ENTRY(glm_scale_uni) + TEST_ENTRY(glm_rotate_x) + TEST_ENTRY(glm_rotate_y) + TEST_ENTRY(glm_rotate_z) + TEST_ENTRY(glm_rotate_make) + TEST_ENTRY(glm_rotate) + TEST_ENTRY(glm_rotate_at) + TEST_ENTRY(glm_rotate_atm) + TEST_ENTRY(glm_decompose_scalev) + TEST_ENTRY(glm_uniscaled) + TEST_ENTRY(glm_decompose_rs) + TEST_ENTRY(glm_decompose) + + TEST_ENTRY(glmc_translate) + TEST_ENTRY(glmc_translate_to) + TEST_ENTRY(glmc_translate_x) + TEST_ENTRY(glmc_translate_y) + TEST_ENTRY(glmc_translate_z) + TEST_ENTRY(glmc_translate_make) + TEST_ENTRY(glmc_scale_to) + TEST_ENTRY(glmc_scale_make) + TEST_ENTRY(glmc_scale) + TEST_ENTRY(glmc_scale_uni) + TEST_ENTRY(glmc_rotate_x) + TEST_ENTRY(glmc_rotate_y) + TEST_ENTRY(glmc_rotate_z) + TEST_ENTRY(glmc_rotate_make) + TEST_ENTRY(glmc_rotate) + TEST_ENTRY(glmc_rotate_at) + TEST_ENTRY(glmc_rotate_atm) + TEST_ENTRY(glmc_decompose_scalev) + TEST_ENTRY(glmc_uniscaled) + TEST_ENTRY(glmc_decompose_rs) + TEST_ENTRY(glmc_decompose) + + /* mat4 */ + TEST_ENTRY(glm_mat4_ucopy) + TEST_ENTRY(glm_mat4_copy) + TEST_ENTRY(glm_mat4_identity) + TEST_ENTRY(glm_mat4_identity_array) + TEST_ENTRY(glm_mat4_zero) + TEST_ENTRY(glm_mat4_pick3) + TEST_ENTRY(glm_mat4_pick3t) + TEST_ENTRY(glm_mat4_ins3) + TEST_ENTRY(glm_mat4_mul) + TEST_ENTRY(glm_mat4_mulN) + TEST_ENTRY(glm_mat4_mulv) + TEST_ENTRY(glm_mat4_mulv3) + TEST_ENTRY(glm_mat4_trace) + TEST_ENTRY(glm_mat4_trace3) + TEST_ENTRY(glm_mat4_quat) + TEST_ENTRY(glm_mat4_transpose_to) + TEST_ENTRY(glm_mat4_transpose) + TEST_ENTRY(glm_mat4_scale_p) + TEST_ENTRY(glm_mat4_scale) + TEST_ENTRY(glm_mat4_det) + TEST_ENTRY(glm_mat4_inv) + TEST_ENTRY(glm_mat4_inv_fast) + TEST_ENTRY(glm_mat4_inv_precise) + TEST_ENTRY(glm_mat4_swap_col) + TEST_ENTRY(glm_mat4_swap_row) + TEST_ENTRY(glm_mat4_rmc) + + TEST_ENTRY(glmc_mat4_ucopy) + TEST_ENTRY(glmc_mat4_copy) + TEST_ENTRY(glmc_mat4_identity) + TEST_ENTRY(glmc_mat4_identity_array) + TEST_ENTRY(glmc_mat4_zero) + TEST_ENTRY(glmc_mat4_pick3) + TEST_ENTRY(glmc_mat4_pick3t) + TEST_ENTRY(glmc_mat4_ins3) + TEST_ENTRY(glmc_mat4_mul) + TEST_ENTRY(glmc_mat4_mulN) + TEST_ENTRY(glmc_mat4_mulv) + TEST_ENTRY(glmc_mat4_mulv3) + TEST_ENTRY(glmc_mat4_trace) + TEST_ENTRY(glmc_mat4_trace3) + TEST_ENTRY(glmc_mat4_quat) + TEST_ENTRY(glmc_mat4_transpose_to) + TEST_ENTRY(glmc_mat4_transpose) + TEST_ENTRY(glmc_mat4_scale_p) + TEST_ENTRY(glmc_mat4_scale) + TEST_ENTRY(glmc_mat4_det) + TEST_ENTRY(glmc_mat4_inv) + TEST_ENTRY(glmc_mat4_inv_fast) + TEST_ENTRY(glmc_mat4_swap_col) + TEST_ENTRY(glmc_mat4_swap_row) + TEST_ENTRY(glmc_mat4_rmc) + + + /* mat3 */ + TEST_ENTRY(glm_mat3_copy) + TEST_ENTRY(glm_mat3_identity) + TEST_ENTRY(glm_mat3_identity_array) + TEST_ENTRY(glm_mat3_zero) + TEST_ENTRY(glm_mat3_mul) + TEST_ENTRY(glm_mat3_mulv) + TEST_ENTRY(glm_mat3_trace) + TEST_ENTRY(glm_mat3_quat) + TEST_ENTRY(glm_mat3_transpose_to) + TEST_ENTRY(glm_mat3_transpose) + TEST_ENTRY(glm_mat3_scale) + TEST_ENTRY(glm_mat3_det) + TEST_ENTRY(glm_mat3_inv) + TEST_ENTRY(glm_mat3_swap_col) + TEST_ENTRY(glm_mat3_swap_row) + TEST_ENTRY(glm_mat3_rmc) + + TEST_ENTRY(glmc_mat3_copy) + TEST_ENTRY(glmc_mat3_identity) + TEST_ENTRY(glmc_mat3_identity_array) + TEST_ENTRY(glmc_mat3_zero) + TEST_ENTRY(glmc_mat3_mul) + TEST_ENTRY(glmc_mat3_mulv) + TEST_ENTRY(glmc_mat3_trace) + TEST_ENTRY(glmc_mat3_quat) + TEST_ENTRY(glmc_mat3_transpose_to) + TEST_ENTRY(glmc_mat3_transpose) + TEST_ENTRY(glmc_mat3_scale) + TEST_ENTRY(glmc_mat3_det) + TEST_ENTRY(glmc_mat3_inv) + TEST_ENTRY(glmc_mat3_swap_col) + TEST_ENTRY(glmc_mat3_swap_row) + TEST_ENTRY(glmc_mat3_rmc) + + /* camera */ + TEST_ENTRY(camera_lookat) + TEST_ENTRY(camera_decomp) + + /* project */ + TEST_ENTRY(glm_unprojecti) + TEST_ENTRY(glm_unproject) + TEST_ENTRY(glm_project) + + TEST_ENTRY(glmc_unprojecti) + TEST_ENTRY(glmc_unproject) + TEST_ENTRY(glmc_project) + + /* plane */ + TEST_ENTRY(glm_plane_normalize) + TEST_ENTRY(glmc_plane_normalize) + + /* utils */ + TEST_ENTRY(clamp) + + /* euler */ + TEST_ENTRY(euler) + + /* quat */ + TEST_ENTRY(MACRO_GLM_QUAT_IDENTITY_INIT) + TEST_ENTRY(MACRO_GLM_QUAT_IDENTITY) + + TEST_ENTRY(glm_quat_identity) + TEST_ENTRY(glm_quat_identity_array) + TEST_ENTRY(glm_quat_init) + TEST_ENTRY(glm_quatv) + TEST_ENTRY(glm_quat) + TEST_ENTRY(glm_quat_copy) + TEST_ENTRY(glm_quat_norm) + TEST_ENTRY(glm_quat_normalize_to) + TEST_ENTRY(glm_quat_normalize) + TEST_ENTRY(glm_quat_dot) + TEST_ENTRY(glm_quat_conjugate) + TEST_ENTRY(glm_quat_inv) + TEST_ENTRY(glm_quat_add) + TEST_ENTRY(glm_quat_sub) + TEST_ENTRY(glm_quat_real) + TEST_ENTRY(glm_quat_imag) + TEST_ENTRY(glm_quat_imagn) + TEST_ENTRY(glm_quat_imaglen) + TEST_ENTRY(glm_quat_angle) + TEST_ENTRY(glm_quat_axis) + TEST_ENTRY(glm_quat_mul) + TEST_ENTRY(glm_quat_mat4) + TEST_ENTRY(glm_quat_mat4t) + TEST_ENTRY(glm_quat_mat3) + TEST_ENTRY(glm_quat_mat3t) + TEST_ENTRY(glm_quat_lerp) + TEST_ENTRY(glm_quat_lerpc) + TEST_ENTRY(glm_quat_slerp) + TEST_ENTRY(glm_quat_look) + TEST_ENTRY(glm_quat_for) + TEST_ENTRY(glm_quat_forp) + TEST_ENTRY(glm_quat_rotatev) + TEST_ENTRY(glm_quat_rotate) + TEST_ENTRY(glm_quat_rotate_at) + TEST_ENTRY(glm_quat_rotate_atm) + + TEST_ENTRY(glmc_quat_identity) + TEST_ENTRY(glmc_quat_identity_array) + TEST_ENTRY(glmc_quat_init) + TEST_ENTRY(glmc_quatv) + TEST_ENTRY(glmc_quat) + TEST_ENTRY(glmc_quat_copy) + TEST_ENTRY(glmc_quat_norm) + TEST_ENTRY(glmc_quat_normalize_to) + TEST_ENTRY(glmc_quat_normalize) + TEST_ENTRY(glmc_quat_dot) + TEST_ENTRY(glmc_quat_conjugate) + TEST_ENTRY(glmc_quat_inv) + TEST_ENTRY(glmc_quat_add) + TEST_ENTRY(glmc_quat_sub) + TEST_ENTRY(glmc_quat_real) + TEST_ENTRY(glmc_quat_imag) + TEST_ENTRY(glmc_quat_imagn) + TEST_ENTRY(glmc_quat_imaglen) + TEST_ENTRY(glmc_quat_angle) + TEST_ENTRY(glmc_quat_axis) + TEST_ENTRY(glmc_quat_mul) + TEST_ENTRY(glmc_quat_mat4) + TEST_ENTRY(glmc_quat_mat4t) + TEST_ENTRY(glmc_quat_mat3) + TEST_ENTRY(glmc_quat_mat3t) + TEST_ENTRY(glmc_quat_lerp) + TEST_ENTRY(glmc_quat_lerpc) + TEST_ENTRY(glmc_quat_slerp) + TEST_ENTRY(glmc_quat_look) + TEST_ENTRY(glmc_quat_for) + TEST_ENTRY(glmc_quat_forp) + TEST_ENTRY(glmc_quat_rotatev) + TEST_ENTRY(glmc_quat_rotate) + TEST_ENTRY(glmc_quat_rotate_at) + TEST_ENTRY(glmc_quat_rotate_atm) + + /* bezier */ + TEST_ENTRY(bezier) + + /* vec3 */ + + /* Macros */ + + TEST_ENTRY(MACRO_GLM_VEC3_ONE_INIT) + TEST_ENTRY(MACRO_GLM_VEC3_ZERO_INIT) + TEST_ENTRY(MACRO_GLM_VEC3_ONE) + TEST_ENTRY(MACRO_GLM_VEC3_ZERO) + TEST_ENTRY(MACRO_GLM_YUP) + TEST_ENTRY(MACRO_GLM_ZUP) + TEST_ENTRY(MACRO_GLM_XUP) + TEST_ENTRY(MACRO_GLM_FORWARD_RH) + TEST_ENTRY(MACRO_GLM_SHUFFLE3) + TEST_ENTRY(MACRO_GLM_XXX) + TEST_ENTRY(MACRO_GLM_YYY) + TEST_ENTRY(MACRO_GLM_ZZZ) + TEST_ENTRY(MACRO_GLM_ZYX) + TEST_ENTRY(MACRO_glm_vec3_dup) + TEST_ENTRY(MACRO_glm_vec3_flipsign) + TEST_ENTRY(MACRO_glm_vec3_flipsign_to) + TEST_ENTRY(MACRO_glm_vec3_inv) + TEST_ENTRY(MACRO_glm_vec3_inv_to) + TEST_ENTRY(MACRO_glm_vec3_mulv) + + TEST_ENTRY(glm_vec3) + TEST_ENTRY(glm_vec3_copy) + TEST_ENTRY(glm_vec3_zero) + TEST_ENTRY(glm_vec3_one) + TEST_ENTRY(glm_vec3_dot) + TEST_ENTRY(glm_dot) + TEST_ENTRY(glm_vec3_norm2) + TEST_ENTRY(glm_vec3_norm) + TEST_ENTRY(glm_vec3_norm_one) + TEST_ENTRY(glm_vec3_norm_inf) + TEST_ENTRY(glm_vec3_add) + TEST_ENTRY(glm_vec3_adds) + TEST_ENTRY(glm_vec3_sub) + TEST_ENTRY(glm_vec3_subs) + TEST_ENTRY(glm_vec3_mul) + TEST_ENTRY(glm_vec3_scale) + TEST_ENTRY(glm_vec3_scale_as) + TEST_ENTRY(glm_vec3_div) + TEST_ENTRY(glm_vec3_divs) + TEST_ENTRY(glm_vec3_addadd) + TEST_ENTRY(glm_vec3_subadd) + TEST_ENTRY(glm_vec3_muladd) + TEST_ENTRY(glm_vec3_muladds) + TEST_ENTRY(glm_vec3_maxadd) + TEST_ENTRY(glm_vec3_minadd) + TEST_ENTRY(glm_vec3_negate_to) + TEST_ENTRY(glm_vec3_negate) + TEST_ENTRY(glm_vec3_normalize) + TEST_ENTRY(glm_vec3_normalize_to) + TEST_ENTRY(glm_normalize) + TEST_ENTRY(glm_normalize_to) + TEST_ENTRY(glm_vec3_cross) + TEST_ENTRY(glm_vec3_crossn) + TEST_ENTRY(glm_cross) + TEST_ENTRY(glm_vec3_angle) + TEST_ENTRY(glm_vec3_rotate) + TEST_ENTRY(glm_vec3_rotate_m4) + TEST_ENTRY(glm_vec3_rotate_m3) + TEST_ENTRY(glm_vec3_proj) + TEST_ENTRY(glm_vec3_center) + TEST_ENTRY(glmc_vec3_distance2) + TEST_ENTRY(glmc_vec3_distance) + TEST_ENTRY(glm_vec3_maxv) + TEST_ENTRY(glm_vec3_minv) + TEST_ENTRY(glm_vec3_ortho) + TEST_ENTRY(glm_vec3_clamp) + TEST_ENTRY(glm_vec3_mix) + TEST_ENTRY(glm_vec3_mixc) + TEST_ENTRY(glm_vec3_step_uni) + TEST_ENTRY(glm_vec3_step) + TEST_ENTRY(glm_vec3_smoothstep_uni) + TEST_ENTRY(glm_vec3_smoothstep) + TEST_ENTRY(glm_vec3_smoothinterp) + TEST_ENTRY(glm_vec3_smoothinterpc) + TEST_ENTRY(glm_vec3_swizzle) + TEST_ENTRY(glm_vec3_broadcast) + TEST_ENTRY(glm_vec3_fill) + TEST_ENTRY(glm_vec3_eq) + TEST_ENTRY(glm_vec3_eq_eps) + TEST_ENTRY(glm_vec3_eq_all) + TEST_ENTRY(glm_vec3_eqv) + TEST_ENTRY(glm_vec3_eqv_eps) + TEST_ENTRY(glm_vec3_max) + TEST_ENTRY(glm_vec3_min) + TEST_ENTRY(glm_vec3_isnan) + TEST_ENTRY(glm_vec3_isinf) + TEST_ENTRY(glm_vec3_isvalid) + TEST_ENTRY(glm_vec3_sign) + TEST_ENTRY(glm_vec3_abs) + TEST_ENTRY(glm_vec3_fract) + TEST_ENTRY(glm_vec3_hadd) + TEST_ENTRY(glm_vec3_sqrt) + + TEST_ENTRY(glmc_vec3) + TEST_ENTRY(glmc_vec3_copy) + TEST_ENTRY(glmc_vec3_zero) + TEST_ENTRY(glmc_vec3_one) + TEST_ENTRY(glmc_vec3_dot) + TEST_ENTRY(glmc_vec3_norm2) + TEST_ENTRY(glmc_vec3_norm) + TEST_ENTRY(glmc_vec3_norm_one) + TEST_ENTRY(glmc_vec3_norm_inf) + TEST_ENTRY(glmc_vec3_add) + TEST_ENTRY(glmc_vec3_adds) + TEST_ENTRY(glmc_vec3_sub) + TEST_ENTRY(glmc_vec3_subs) + TEST_ENTRY(glmc_vec3_mul) + TEST_ENTRY(glmc_vec3_scale) + TEST_ENTRY(glmc_vec3_scale_as) + TEST_ENTRY(glmc_vec3_div) + TEST_ENTRY(glmc_vec3_divs) + TEST_ENTRY(glmc_vec3_addadd) + TEST_ENTRY(glmc_vec3_subadd) + TEST_ENTRY(glmc_vec3_muladd) + TEST_ENTRY(glmc_vec3_muladds) + TEST_ENTRY(glmc_vec3_maxadd) + TEST_ENTRY(glmc_vec3_minadd) + TEST_ENTRY(glmc_vec3_negate_to) + TEST_ENTRY(glmc_vec3_negate) + TEST_ENTRY(glmc_vec3_normalize) + TEST_ENTRY(glmc_vec3_normalize_to) + TEST_ENTRY(glmc_vec3_cross) + TEST_ENTRY(glmc_vec3_crossn) + TEST_ENTRY(glmc_vec3_angle) + TEST_ENTRY(glmc_vec3_rotate) + TEST_ENTRY(glmc_vec3_rotate_m4) + TEST_ENTRY(glmc_vec3_rotate_m3) + TEST_ENTRY(glmc_vec3_proj) + TEST_ENTRY(glmc_vec3_center) + TEST_ENTRY(glmc_vec3_distance2) + TEST_ENTRY(glmc_vec3_distance) + TEST_ENTRY(glmc_vec3_maxv) + TEST_ENTRY(glmc_vec3_minv) + TEST_ENTRY(glmc_vec3_ortho) + TEST_ENTRY(glmc_vec3_clamp) + TEST_ENTRY(glmc_vec3_mix) + TEST_ENTRY(glmc_vec3_mixc) + TEST_ENTRY(glmc_vec3_step_uni) + TEST_ENTRY(glmc_vec3_step) + TEST_ENTRY(glmc_vec3_smoothstep_uni) + TEST_ENTRY(glmc_vec3_smoothstep) + TEST_ENTRY(glmc_vec3_smoothinterp) + TEST_ENTRY(glmc_vec3_smoothinterpc) + TEST_ENTRY(glmc_vec3_swizzle) + TEST_ENTRY(glmc_vec3_broadcast) + TEST_ENTRY(glmc_vec3_fill) + TEST_ENTRY(glmc_vec3_eq) + TEST_ENTRY(glmc_vec3_eq_eps) + TEST_ENTRY(glmc_vec3_eq_all) + TEST_ENTRY(glmc_vec3_eqv) + TEST_ENTRY(glmc_vec3_eqv_eps) + TEST_ENTRY(glmc_vec3_max) + TEST_ENTRY(glmc_vec3_min) + TEST_ENTRY(glmc_vec3_isnan) + TEST_ENTRY(glmc_vec3_isinf) + TEST_ENTRY(glmc_vec3_isvalid) + TEST_ENTRY(glmc_vec3_sign) + TEST_ENTRY(glmc_vec3_abs) + TEST_ENTRY(glmc_vec3_fract) + TEST_ENTRY(glmc_vec3_hadd) + TEST_ENTRY(glmc_vec3_sqrt) + + /* vec4 */ + + TEST_ENTRY(MACRO_GLM_VEC4_ONE_INIT) + TEST_ENTRY(MACRO_GLM_VEC4_ZERO_INIT) + TEST_ENTRY(MACRO_GLM_VEC4_ONE) + TEST_ENTRY(MACRO_GLM_VEC4_ZERO) + TEST_ENTRY(MACRO_GLM_XXXX) + TEST_ENTRY(MACRO_GLM_YYYY) + TEST_ENTRY(MACRO_GLM_ZZZZ) + TEST_ENTRY(MACRO_GLM_WZYX) + TEST_ENTRY(MACRO_glm_vec4_dup) + TEST_ENTRY(MACRO_glm_vec4_flipsign) + TEST_ENTRY(MACRO_glm_vec4_flipsign_to) + TEST_ENTRY(MACRO_glm_vec4_inv) + TEST_ENTRY(MACRO_glm_vec4_inv_to) + TEST_ENTRY(MACRO_glm_vec4_mulv) + + TEST_ENTRY(glm_vec4) + TEST_ENTRY(glm_vec4_copy3) + TEST_ENTRY(glm_vec4_copy) + TEST_ENTRY(glm_vec4_ucopy) + TEST_ENTRY(glm_vec4_zero) + TEST_ENTRY(glm_vec4_one) + TEST_ENTRY(glm_vec4_dot) + TEST_ENTRY(glm_vec4_norm2) + TEST_ENTRY(glm_vec4_norm) + TEST_ENTRY(glm_vec4_norm_one) + TEST_ENTRY(glm_vec4_norm_inf) + TEST_ENTRY(glm_vec4_add) + TEST_ENTRY(glm_vec4_adds) + TEST_ENTRY(glm_vec4_sub) + TEST_ENTRY(glm_vec4_subs) + TEST_ENTRY(glm_vec4_mul) + TEST_ENTRY(glm_vec4_scale) + TEST_ENTRY(glm_vec4_scale_as) + TEST_ENTRY(glm_vec4_div) + TEST_ENTRY(glm_vec4_divs) + TEST_ENTRY(glm_vec4_addadd) + TEST_ENTRY(glm_vec4_subadd) + TEST_ENTRY(glm_vec4_muladd) + TEST_ENTRY(glm_vec4_muladds) + TEST_ENTRY(glm_vec4_maxadd) + TEST_ENTRY(glm_vec4_minadd) + TEST_ENTRY(glm_vec4_negate_to) + TEST_ENTRY(glm_vec4_negate) + TEST_ENTRY(glm_vec4_normalize) + TEST_ENTRY(glm_vec4_normalize_to) + TEST_ENTRY(glm_vec4_distance2) + TEST_ENTRY(glm_vec4_distance) + TEST_ENTRY(glm_vec4_maxv) + TEST_ENTRY(glm_vec4_minv) + TEST_ENTRY(glm_vec4_clamp) + TEST_ENTRY(glm_vec4_lerp) + TEST_ENTRY(glm_vec4_lerpc) + TEST_ENTRY(glm_vec4_mix) + TEST_ENTRY(glm_vec4_mixc) + TEST_ENTRY(glm_vec4_step_uni) + TEST_ENTRY(glm_vec4_step) + TEST_ENTRY(glm_vec4_smoothstep_uni) + TEST_ENTRY(glm_vec4_smoothstep) + TEST_ENTRY(glm_vec4_smoothinterp) + TEST_ENTRY(glm_vec4_smoothinterpc) + TEST_ENTRY(glm_vec4_cubic) + TEST_ENTRY(glm_vec4_swizzle) + TEST_ENTRY(glm_vec4_broadcast) + TEST_ENTRY(glm_vec4_fill) + TEST_ENTRY(glm_vec4_eq) + TEST_ENTRY(glm_vec4_eq_eps) + TEST_ENTRY(glm_vec4_eq_all) + TEST_ENTRY(glm_vec4_eqv) + TEST_ENTRY(glm_vec4_eqv_eps) + TEST_ENTRY(glm_vec4_max) + TEST_ENTRY(glm_vec4_min) + TEST_ENTRY(glm_vec4_isnan) + TEST_ENTRY(glm_vec4_isinf) + TEST_ENTRY(glm_vec4_isvalid) + TEST_ENTRY(glm_vec4_sign) + TEST_ENTRY(glm_vec4_abs) + TEST_ENTRY(glm_vec4_fract) + TEST_ENTRY(glm_vec4_hadd) + TEST_ENTRY(glm_vec4_sqrt) + + TEST_ENTRY(glmc_vec4) + TEST_ENTRY(glmc_vec4_copy3) + TEST_ENTRY(glmc_vec4_copy) + TEST_ENTRY(glmc_vec4_ucopy) + TEST_ENTRY(glmc_vec4_zero) + TEST_ENTRY(glmc_vec4_one) + TEST_ENTRY(glmc_vec4_dot) + TEST_ENTRY(glmc_vec4_norm2) + TEST_ENTRY(glmc_vec4_norm) + TEST_ENTRY(glmc_vec4_norm_one) + TEST_ENTRY(glmc_vec4_norm_inf) + TEST_ENTRY(glmc_vec4_add) + TEST_ENTRY(glmc_vec4_adds) + TEST_ENTRY(glmc_vec4_sub) + TEST_ENTRY(glmc_vec4_subs) + TEST_ENTRY(glmc_vec4_mul) + TEST_ENTRY(glmc_vec4_scale) + TEST_ENTRY(glmc_vec4_scale_as) + TEST_ENTRY(glmc_vec4_div) + TEST_ENTRY(glmc_vec4_divs) + TEST_ENTRY(glmc_vec4_addadd) + TEST_ENTRY(glmc_vec4_subadd) + TEST_ENTRY(glmc_vec4_muladd) + TEST_ENTRY(glmc_vec4_muladds) + TEST_ENTRY(glmc_vec4_maxadd) + TEST_ENTRY(glmc_vec4_minadd) + TEST_ENTRY(glmc_vec4_negate_to) + TEST_ENTRY(glmc_vec4_negate) + TEST_ENTRY(glmc_vec4_normalize) + TEST_ENTRY(glmc_vec4_normalize_to) + TEST_ENTRY(glmc_vec4_distance2) + TEST_ENTRY(glmc_vec4_distance) + TEST_ENTRY(glmc_vec4_maxv) + TEST_ENTRY(glmc_vec4_minv) + TEST_ENTRY(glmc_vec4_clamp) + TEST_ENTRY(glmc_vec4_lerp) + TEST_ENTRY(glmc_vec4_lerpc) + TEST_ENTRY(glmc_vec4_mix) + TEST_ENTRY(glmc_vec4_mixc) + TEST_ENTRY(glmc_vec4_step_uni) + TEST_ENTRY(glmc_vec4_step) + TEST_ENTRY(glmc_vec4_smoothstep_uni) + TEST_ENTRY(glmc_vec4_smoothstep) + TEST_ENTRY(glmc_vec4_smoothinterp) + TEST_ENTRY(glmc_vec4_smoothinterpc) + TEST_ENTRY(glmc_vec4_cubic) + TEST_ENTRY(glmc_vec4_swizzle) + TEST_ENTRY(glmc_vec4_broadcast) + TEST_ENTRY(glmc_vec4_fill) + TEST_ENTRY(glmc_vec4_eq) + TEST_ENTRY(glmc_vec4_eq_eps) + TEST_ENTRY(glmc_vec4_eq_all) + TEST_ENTRY(glmc_vec4_eqv) + TEST_ENTRY(glmc_vec4_eqv_eps) + TEST_ENTRY(glmc_vec4_max) + TEST_ENTRY(glmc_vec4_min) + TEST_ENTRY(glmc_vec4_isnan) + TEST_ENTRY(glmc_vec4_isinf) + TEST_ENTRY(glmc_vec4_isvalid) + TEST_ENTRY(glmc_vec4_sign) + TEST_ENTRY(glmc_vec4_abs) + TEST_ENTRY(glmc_vec4_fract) + TEST_ENTRY(glmc_vec4_hadd) + TEST_ENTRY(glmc_vec4_sqrt) + + /* structs */ + TEST_ENTRY(mat3s_identity_init) + TEST_ENTRY(mat3s_zero_init) + TEST_ENTRY(mat4s_identity_init) + TEST_ENTRY(mat4s_zero_init) + TEST_ENTRY(quats_zero_init) + TEST_ENTRY(vec3s_one_init) + TEST_ENTRY(vec3s_zero_init) + TEST_ENTRY(vec4s_black_init) + TEST_ENTRY(vec4s_one_init) + TEST_ENTRY(vec4s_zero_init) +}; + +#endif /* tests_h */ diff --git a/win/.gitignore b/win/.gitignore index da6703c..96c0a51 100644 --- a/win/.gitignore +++ b/win/.gitignore @@ -1,4 +1,9 @@ !cglm.sln + !cglm.vcxproj !cglm.vcxproj.filters + +!cglm-test.vcxproj +!cglm-test.vcxproj.filters + !packages.config diff --git a/win/cglm-test.vcxproj b/win/cglm-test.vcxproj new file mode 100644 index 0000000..f7ccc91 --- /dev/null +++ b/win/cglm-test.vcxproj @@ -0,0 +1,190 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + + + + + + + + + + + + + + + + + + + + + + + {ca8bcaf9-cd25-4133-8f62-3d1449b5d2fc} + + + + 16.0 + {200E0DF1-7532-44E6-8273-84FB92C5557E} + Win32Proj + cglmtest + 10.0 + + + + Application + true + v142 + Unicode + + + Application + false + v142 + true + Unicode + + + Application + true + v142 + Unicode + + + Application + false + v142 + true + Unicode + + + + + + + + + + + + + + + + + + + + + false + + + true + + + true + + + false + + + + + + Level3 + MaxSpeed + true + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../include;%(AdditionalIncludeDirectories) + + + Console + true + true + true + %(AdditionalDependencies) + + + + + + + Level3 + Disabled + true + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../include;%(AdditionalIncludeDirectories) + + + Console + true + %(AdditionalDependencies) + + + + + + + Level3 + Disabled + true + _DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../include;%(AdditionalIncludeDirectories) + + + Console + true + %(AdditionalDependencies) + + + + + + + Level3 + MaxSpeed + true + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../include;%(AdditionalIncludeDirectories) + + + Console + true + true + true + %(AdditionalDependencies) + + + + + + \ No newline at end of file diff --git a/win/cglm-test.vcxproj.filters b/win/cglm-test.vcxproj.filters new file mode 100644 index 0000000..66691ff --- /dev/null +++ b/win/cglm-test.vcxproj.filters @@ -0,0 +1,68 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;ipp;xsd + + + + + src + + + src + + + src + + + src + + + src + + + src + + + src + + + src + + + + + include + + + include + + + include + + + src + + + src + + + src + + + src + + + src + + + src + + + \ No newline at end of file diff --git a/win/cglm.sln b/win/cglm.sln index 6316235..04f08b4 100644 --- a/win/cglm.sln +++ b/win/cglm.sln @@ -1,10 +1,12 @@  Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 15 -VisualStudioVersion = 15.0.26403.7 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29123.88 MinimumVisualStudioVersion = 10.0.40219.1 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "cglm", "cglm.vcxproj", "{CA8BCAF9-CD25-4133-8F62-3D1449B5D2FC}" EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "cglm-test", "cglm-test.vcxproj", "{200E0DF1-7532-44E6-8273-84FB92C5557E}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|x64 = Debug|x64 @@ -21,8 +23,19 @@ Global {CA8BCAF9-CD25-4133-8F62-3D1449B5D2FC}.Release|x64.Build.0 = Release|x64 {CA8BCAF9-CD25-4133-8F62-3D1449B5D2FC}.Release|x86.ActiveCfg = Release|Win32 {CA8BCAF9-CD25-4133-8F62-3D1449B5D2FC}.Release|x86.Build.0 = Release|Win32 + {200E0DF1-7532-44E6-8273-84FB92C5557E}.Debug|x64.ActiveCfg = Debug|x64 + {200E0DF1-7532-44E6-8273-84FB92C5557E}.Debug|x64.Build.0 = Debug|x64 + {200E0DF1-7532-44E6-8273-84FB92C5557E}.Debug|x86.ActiveCfg = Debug|Win32 + {200E0DF1-7532-44E6-8273-84FB92C5557E}.Debug|x86.Build.0 = Debug|Win32 + {200E0DF1-7532-44E6-8273-84FB92C5557E}.Release|x64.ActiveCfg = Release|x64 + {200E0DF1-7532-44E6-8273-84FB92C5557E}.Release|x64.Build.0 = Release|x64 + {200E0DF1-7532-44E6-8273-84FB92C5557E}.Release|x86.ActiveCfg = Release|Win32 + {200E0DF1-7532-44E6-8273-84FB92C5557E}.Release|x86.Build.0 = Release|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {2AEF23C9-433B-428B-BEBC-068BF3AC9A65} + EndGlobalSection EndGlobal diff --git a/win/cglm.vcxproj.filters b/win/cglm.vcxproj.filters index 952ace7..917f837 100644 --- a/win/cglm.vcxproj.filters +++ b/win/cglm.vcxproj.filters @@ -5,10 +5,6 @@ {93995380-89BD-4b04-88EB-625FBE52EBFB} h;hh;hpp;hxx;hm;inl;inc;xsd - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx