123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170 |
- // SPDX-License-Identifier: Apache-2.0
- // ----------------------------------------------------------------------------
- // Copyright 2019-2022 Arm Limited
- //
- // Licensed under the Apache License, Version 2.0 (the "License"); you may not
- // use this file except in compliance with the License. You may obtain a copy
- // of the License at:
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- // License for the specific language governing permissions and limitations
- // under the License.
- // ----------------------------------------------------------------------------
- /**
- * @brief 4x32-bit vectors, implemented using plain C++.
- *
- * This module implements 4-wide 32-bit float, int, and mask vectors. This
- * module provides a scalar fallback for VLA code, primarily useful for
- * debugging VLA algorithms without the complexity of handling SIMD. Only the
- * baseline level of functionality needed to support VLA is provided.
- *
- * Note that the vector conditional operators implemented by this module are
- * designed to behave like SIMD conditional operators that generate lane masks.
- * Rather than returning 0/1 booleans like normal C++ code they will return
- * 0/-1 to give a full lane-width bitmask.
- *
- * Note that the documentation for this module still talks about "vectors" to
- * help developers think about the implied VLA behavior when writing optimized
- * paths.
- */
- #ifndef ASTC_VECMATHLIB_NONE_4_H_INCLUDED
- #define ASTC_VECMATHLIB_NONE_4_H_INCLUDED
- #ifndef ASTCENC_SIMD_INLINE
- #error "Include astcenc_vecmathlib.h, do not include directly"
- #endif
- #include <algorithm>
- #include <cstdio>
- #include <cstring>
- #include <cfenv>
- // ============================================================================
- // vfloat4 data type
- // ============================================================================
- /**
- * @brief Data type for 4-wide floats.
- */
- struct vfloat4
- {
- /**
- * @brief Construct from zero-initialized value.
- */
- ASTCENC_SIMD_INLINE vfloat4() = default;
- /**
- * @brief Construct from 4 values loaded from an unaligned address.
- *
- * Consider using loada() which is better with wider VLA vectors if data is
- * aligned to vector length.
- */
- ASTCENC_SIMD_INLINE explicit vfloat4(const float* p)
- {
- m[0] = p[0];
- m[1] = p[1];
- m[2] = p[2];
- m[3] = p[3];
- }
- /**
- * @brief Construct from 4 scalar values replicated across all lanes.
- *
- * Consider using zero() for constexpr zeros.
- */
- ASTCENC_SIMD_INLINE explicit vfloat4(float a)
- {
- m[0] = a;
- m[1] = a;
- m[2] = a;
- m[3] = a;
- }
- /**
- * @brief Construct from 4 scalar values.
- *
- * The value of @c a is stored to lane 0 (LSB) in the SIMD register.
- */
- ASTCENC_SIMD_INLINE explicit vfloat4(float a, float b, float c, float d)
- {
- m[0] = a;
- m[1] = b;
- m[2] = c;
- m[3] = d;
- }
- /**
- * @brief Get the scalar value of a single lane.
- */
- template <int l> ASTCENC_SIMD_INLINE float lane() const
- {
- return m[l];
- }
- /**
- * @brief Set the scalar value of a single lane.
- */
- template <int l> ASTCENC_SIMD_INLINE void set_lane(float a)
- {
- m[l] = a;
- }
- /**
- * @brief Factory that returns a vector of zeros.
- */
- static ASTCENC_SIMD_INLINE vfloat4 zero()
- {
- return vfloat4(0.0f);
- }
- /**
- * @brief Factory that returns a replicated scalar loaded from memory.
- */
- static ASTCENC_SIMD_INLINE vfloat4 load1(const float* p)
- {
- return vfloat4(*p);
- }
- /**
- * @brief Factory that returns a vector loaded from aligned memory.
- */
- static ASTCENC_SIMD_INLINE vfloat4 loada(const float* p)
- {
- return vfloat4(p);
- }
- /**
- * @brief Factory that returns a vector containing the lane IDs.
- */
- static ASTCENC_SIMD_INLINE vfloat4 lane_id()
- {
- return vfloat4(0.0f, 1.0f, 2.0f, 3.0f);
- }
- /**
- * @brief Return a swizzled float 2.
- */
- template <int l0, int l1> ASTCENC_SIMD_INLINE vfloat4 swz() const
- {
- return vfloat4(lane<l0>(), lane<l1>(), 0.0f, 0.0f);
- }
- /**
- * @brief Return a swizzled float 3.
- */
- template <int l0, int l1, int l2> ASTCENC_SIMD_INLINE vfloat4 swz() const
- {
- return vfloat4(lane<l0>(), lane<l1>(), lane<l2>(), 0.0f);
- }
- /**
- * @brief Return a swizzled float 4.
- */
- template <int l0, int l1, int l2, int l3> ASTCENC_SIMD_INLINE vfloat4 swz() const
- {
- return vfloat4(lane<l0>(), lane<l1>(), lane<l2>(), lane<l3>());
- }
- /**
- * @brief The vector ...
- */
- float m[4];
- };
- // ============================================================================
- // vint4 data type
- // ============================================================================
- /**
- * @brief Data type for 4-wide ints.
- */
- struct vint4
- {
- /**
- * @brief Construct from zero-initialized value.
- */
- ASTCENC_SIMD_INLINE vint4() = default;
- /**
- * @brief Construct from 4 values loaded from an unaligned address.
- *
- * Consider using vint4::loada() which is better with wider VLA vectors
- * if data is aligned.
- */
- ASTCENC_SIMD_INLINE explicit vint4(const int* p)
- {
- m[0] = p[0];
- m[1] = p[1];
- m[2] = p[2];
- m[3] = p[3];
- }
- /**
- * @brief Construct from 4 uint8_t loaded from an unaligned address.
- */
- ASTCENC_SIMD_INLINE explicit vint4(const uint8_t *p)
- {
- m[0] = p[0];
- m[1] = p[1];
- m[2] = p[2];
- m[3] = p[3];
- }
- /**
- * @brief Construct from 4 scalar values.
- *
- * The value of @c a is stored to lane 0 (LSB) in the SIMD register.
- */
- ASTCENC_SIMD_INLINE explicit vint4(int a, int b, int c, int d)
- {
- m[0] = a;
- m[1] = b;
- m[2] = c;
- m[3] = d;
- }
- /**
- * @brief Construct from 4 scalar values replicated across all lanes.
- *
- * Consider using vint4::zero() for constexpr zeros.
- */
- ASTCENC_SIMD_INLINE explicit vint4(int a)
- {
- m[0] = a;
- m[1] = a;
- m[2] = a;
- m[3] = a;
- }
- /**
- * @brief Get the scalar value of a single lane.
- */
- template <int l> ASTCENC_SIMD_INLINE int lane() const
- {
- return m[l];
- }
- /**
- * @brief Set the scalar value of a single lane.
- */
- template <int l> ASTCENC_SIMD_INLINE void set_lane(int a)
- {
- m[l] = a;
- }
- /**
- * @brief Factory that returns a vector of zeros.
- */
- static ASTCENC_SIMD_INLINE vint4 zero()
- {
- return vint4(0);
- }
- /**
- * @brief Factory that returns a replicated scalar loaded from memory.
- */
- static ASTCENC_SIMD_INLINE vint4 load1(const int* p)
- {
- return vint4(*p);
- }
- /**
- * @brief Factory that returns a vector loaded from 16B aligned memory.
- */
- static ASTCENC_SIMD_INLINE vint4 loada(const int* p)
- {
- return vint4(p);
- }
- /**
- * @brief Factory that returns a vector containing the lane IDs.
- */
- static ASTCENC_SIMD_INLINE vint4 lane_id()
- {
- return vint4(0, 1, 2, 3);
- }
- /**
- * @brief The vector ...
- */
- int m[4];
- };
- // ============================================================================
- // vmask4 data type
- // ============================================================================
- /**
- * @brief Data type for 4-wide control plane masks.
- */
- struct vmask4
- {
- /**
- * @brief Construct from an existing mask value.
- */
- ASTCENC_SIMD_INLINE explicit vmask4(int* p)
- {
- m[0] = p[0];
- m[1] = p[1];
- m[2] = p[2];
- m[3] = p[3];
- }
- /**
- * @brief Construct from 1 scalar value.
- */
- ASTCENC_SIMD_INLINE explicit vmask4(bool a)
- {
- m[0] = a == false ? 0 : -1;
- m[1] = a == false ? 0 : -1;
- m[2] = a == false ? 0 : -1;
- m[3] = a == false ? 0 : -1;
- }
- /**
- * @brief Construct from 4 scalar values.
- *
- * The value of @c a is stored to lane 0 (LSB) in the SIMD register.
- */
- ASTCENC_SIMD_INLINE explicit vmask4(bool a, bool b, bool c, bool d)
- {
- m[0] = a == false ? 0 : -1;
- m[1] = b == false ? 0 : -1;
- m[2] = c == false ? 0 : -1;
- m[3] = d == false ? 0 : -1;
- }
- /**
- * @brief The vector ...
- */
- int m[4];
- };
- // ============================================================================
- // vmask4 operators and functions
- // ============================================================================
- /**
- * @brief Overload: mask union (or).
- */
- ASTCENC_SIMD_INLINE vmask4 operator|(vmask4 a, vmask4 b)
- {
- return vmask4(a.m[0] | b.m[0],
- a.m[1] | b.m[1],
- a.m[2] | b.m[2],
- a.m[3] | b.m[3]);
- }
- /**
- * @brief Overload: mask intersect (and).
- */
- ASTCENC_SIMD_INLINE vmask4 operator&(vmask4 a, vmask4 b)
- {
- return vmask4(a.m[0] & b.m[0],
- a.m[1] & b.m[1],
- a.m[2] & b.m[2],
- a.m[3] & b.m[3]);
- }
- /**
- * @brief Overload: mask difference (xor).
- */
- ASTCENC_SIMD_INLINE vmask4 operator^(vmask4 a, vmask4 b)
- {
- return vmask4(a.m[0] ^ b.m[0],
- a.m[1] ^ b.m[1],
- a.m[2] ^ b.m[2],
- a.m[3] ^ b.m[3]);
- }
- /**
- * @brief Overload: mask invert (not).
- */
- ASTCENC_SIMD_INLINE vmask4 operator~(vmask4 a)
- {
- return vmask4(~a.m[0],
- ~a.m[1],
- ~a.m[2],
- ~a.m[3]);
- }
- /**
- * @brief Return a 1-bit mask code indicating mask status.
- *
- * bit0 = lane 0
- */
- ASTCENC_SIMD_INLINE unsigned int mask(vmask4 a)
- {
- return ((a.m[0] >> 31) & 0x1) |
- ((a.m[1] >> 30) & 0x2) |
- ((a.m[2] >> 29) & 0x4) |
- ((a.m[3] >> 28) & 0x8);
- }
- // ============================================================================
- // vint4 operators and functions
- // ============================================================================
- /**
- * @brief Overload: vector by vector addition.
- */
- ASTCENC_SIMD_INLINE vint4 operator+(vint4 a, vint4 b)
- {
- return vint4(a.m[0] + b.m[0],
- a.m[1] + b.m[1],
- a.m[2] + b.m[2],
- a.m[3] + b.m[3]);
- }
- /**
- * @brief Overload: vector by vector subtraction.
- */
- ASTCENC_SIMD_INLINE vint4 operator-(vint4 a, vint4 b)
- {
- return vint4(a.m[0] - b.m[0],
- a.m[1] - b.m[1],
- a.m[2] - b.m[2],
- a.m[3] - b.m[3]);
- }
- /**
- * @brief Overload: vector by vector multiplication.
- */
- ASTCENC_SIMD_INLINE vint4 operator*(vint4 a, vint4 b)
- {
- return vint4(a.m[0] * b.m[0],
- a.m[1] * b.m[1],
- a.m[2] * b.m[2],
- a.m[3] * b.m[3]);
- }
- /**
- * @brief Overload: vector bit invert.
- */
- ASTCENC_SIMD_INLINE vint4 operator~(vint4 a)
- {
- return vint4(~a.m[0],
- ~a.m[1],
- ~a.m[2],
- ~a.m[3]);
- }
- /**
- * @brief Overload: vector by vector bitwise or.
- */
- ASTCENC_SIMD_INLINE vint4 operator|(vint4 a, vint4 b)
- {
- return vint4(a.m[0] | b.m[0],
- a.m[1] | b.m[1],
- a.m[2] | b.m[2],
- a.m[3] | b.m[3]);
- }
- /**
- * @brief Overload: vector by vector bitwise and.
- */
- ASTCENC_SIMD_INLINE vint4 operator&(vint4 a, vint4 b)
- {
- return vint4(a.m[0] & b.m[0],
- a.m[1] & b.m[1],
- a.m[2] & b.m[2],
- a.m[3] & b.m[3]);
- }
- /**
- * @brief Overload: vector by vector bitwise xor.
- */
- ASTCENC_SIMD_INLINE vint4 operator^(vint4 a, vint4 b)
- {
- return vint4(a.m[0] ^ b.m[0],
- a.m[1] ^ b.m[1],
- a.m[2] ^ b.m[2],
- a.m[3] ^ b.m[3]);
- }
- /**
- * @brief Overload: vector by vector equality.
- */
- ASTCENC_SIMD_INLINE vmask4 operator==(vint4 a, vint4 b)
- {
- return vmask4(a.m[0] == b.m[0],
- a.m[1] == b.m[1],
- a.m[2] == b.m[2],
- a.m[3] == b.m[3]);
- }
- /**
- * @brief Overload: vector by vector inequality.
- */
- ASTCENC_SIMD_INLINE vmask4 operator!=(vint4 a, vint4 b)
- {
- return vmask4(a.m[0] != b.m[0],
- a.m[1] != b.m[1],
- a.m[2] != b.m[2],
- a.m[3] != b.m[3]);
- }
- /**
- * @brief Overload: vector by vector less than.
- */
- ASTCENC_SIMD_INLINE vmask4 operator<(vint4 a, vint4 b)
- {
- return vmask4(a.m[0] < b.m[0],
- a.m[1] < b.m[1],
- a.m[2] < b.m[2],
- a.m[3] < b.m[3]);
- }
- /**
- * @brief Overload: vector by vector greater than.
- */
- ASTCENC_SIMD_INLINE vmask4 operator>(vint4 a, vint4 b)
- {
- return vmask4(a.m[0] > b.m[0],
- a.m[1] > b.m[1],
- a.m[2] > b.m[2],
- a.m[3] > b.m[3]);
- }
- /**
- * @brief Logical shift left.
- */
- template <int s> ASTCENC_SIMD_INLINE vint4 lsl(vint4 a)
- {
- return vint4(a.m[0] << s,
- a.m[1] << s,
- a.m[2] << s,
- a.m[3] << s);
- }
- /**
- * @brief Logical shift right.
- */
- template <int s> ASTCENC_SIMD_INLINE vint4 lsr(vint4 a)
- {
- unsigned int as0 = static_cast<unsigned int>(a.m[0]) >> s;
- unsigned int as1 = static_cast<unsigned int>(a.m[1]) >> s;
- unsigned int as2 = static_cast<unsigned int>(a.m[2]) >> s;
- unsigned int as3 = static_cast<unsigned int>(a.m[3]) >> s;
- return vint4(static_cast<int>(as0),
- static_cast<int>(as1),
- static_cast<int>(as2),
- static_cast<int>(as3));
- }
- /**
- * @brief Arithmetic shift right.
- */
- template <int s> ASTCENC_SIMD_INLINE vint4 asr(vint4 a)
- {
- return vint4(a.m[0] >> s,
- a.m[1] >> s,
- a.m[2] >> s,
- a.m[3] >> s);
- }
- /**
- * @brief Return the min vector of two vectors.
- */
- ASTCENC_SIMD_INLINE vint4 min(vint4 a, vint4 b)
- {
- return vint4(a.m[0] < b.m[0] ? a.m[0] : b.m[0],
- a.m[1] < b.m[1] ? a.m[1] : b.m[1],
- a.m[2] < b.m[2] ? a.m[2] : b.m[2],
- a.m[3] < b.m[3] ? a.m[3] : b.m[3]);
- }
- /**
- * @brief Return the min vector of two vectors.
- */
- ASTCENC_SIMD_INLINE vint4 max(vint4 a, vint4 b)
- {
- return vint4(a.m[0] > b.m[0] ? a.m[0] : b.m[0],
- a.m[1] > b.m[1] ? a.m[1] : b.m[1],
- a.m[2] > b.m[2] ? a.m[2] : b.m[2],
- a.m[3] > b.m[3] ? a.m[3] : b.m[3]);
- }
- /**
- * @brief Return the horizontal minimum of a single vector.
- */
- ASTCENC_SIMD_INLINE vint4 hmin(vint4 a)
- {
- int b = std::min(a.m[0], a.m[1]);
- int c = std::min(a.m[2], a.m[3]);
- return vint4(std::min(b, c));
- }
- /**
- * @brief Return the horizontal maximum of a single vector.
- */
- ASTCENC_SIMD_INLINE vint4 hmax(vint4 a)
- {
- int b = std::max(a.m[0], a.m[1]);
- int c = std::max(a.m[2], a.m[3]);
- return vint4(std::max(b, c));
- }
- /**
- * @brief Return the horizontal sum of vector lanes as a scalar.
- */
- ASTCENC_SIMD_INLINE int hadd_s(vint4 a)
- {
- return a.m[0] + a.m[1] + a.m[2] + a.m[3];
- }
- /**
- * @brief Store a vector to an aligned memory address.
- */
- ASTCENC_SIMD_INLINE void storea(vint4 a, int* p)
- {
- p[0] = a.m[0];
- p[1] = a.m[1];
- p[2] = a.m[2];
- p[3] = a.m[3];
- }
- /**
- * @brief Store a vector to an unaligned memory address.
- */
- ASTCENC_SIMD_INLINE void store(vint4 a, int* p)
- {
- p[0] = a.m[0];
- p[1] = a.m[1];
- p[2] = a.m[2];
- p[3] = a.m[3];
- }
- /**
- * @brief Store lowest N (vector width) bytes into an unaligned address.
- */
- ASTCENC_SIMD_INLINE void store_nbytes(vint4 a, uint8_t* p)
- {
- int* pi = reinterpret_cast<int*>(p);
- *pi = a.m[0];
- }
- /**
- * @brief Gather N (vector width) indices from the array.
- */
- ASTCENC_SIMD_INLINE vint4 gatheri(const int* base, vint4 indices)
- {
- return vint4(base[indices.m[0]],
- base[indices.m[1]],
- base[indices.m[2]],
- base[indices.m[3]]);
- }
- /**
- * @brief Pack low 8 bits of N (vector width) lanes into bottom of vector.
- */
- ASTCENC_SIMD_INLINE vint4 pack_low_bytes(vint4 a)
- {
- int b0 = a.m[0] & 0xFF;
- int b1 = a.m[1] & 0xFF;
- int b2 = a.m[2] & 0xFF;
- int b3 = a.m[3] & 0xFF;
- int b = b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
- return vint4(b, 0, 0, 0);
- }
- /**
- * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.
- */
- ASTCENC_SIMD_INLINE vint4 select(vint4 a, vint4 b, vmask4 cond)
- {
- return vint4((cond.m[0] & static_cast<int>(0x80000000)) ? b.m[0] : a.m[0],
- (cond.m[1] & static_cast<int>(0x80000000)) ? b.m[1] : a.m[1],
- (cond.m[2] & static_cast<int>(0x80000000)) ? b.m[2] : a.m[2],
- (cond.m[3] & static_cast<int>(0x80000000)) ? b.m[3] : a.m[3]);
- }
- // ============================================================================
- // vfloat4 operators and functions
- // ============================================================================
- /**
- * @brief Overload: vector by vector addition.
- */
- ASTCENC_SIMD_INLINE vfloat4 operator+(vfloat4 a, vfloat4 b)
- {
- return vfloat4(a.m[0] + b.m[0],
- a.m[1] + b.m[1],
- a.m[2] + b.m[2],
- a.m[3] + b.m[3]);
- }
- /**
- * @brief Overload: vector by vector subtraction.
- */
- ASTCENC_SIMD_INLINE vfloat4 operator-(vfloat4 a, vfloat4 b)
- {
- return vfloat4(a.m[0] - b.m[0],
- a.m[1] - b.m[1],
- a.m[2] - b.m[2],
- a.m[3] - b.m[3]);
- }
- /**
- * @brief Overload: vector by vector multiplication.
- */
- ASTCENC_SIMD_INLINE vfloat4 operator*(vfloat4 a, vfloat4 b)
- {
- return vfloat4(a.m[0] * b.m[0],
- a.m[1] * b.m[1],
- a.m[2] * b.m[2],
- a.m[3] * b.m[3]);
- }
- /**
- * @brief Overload: vector by vector division.
- */
- ASTCENC_SIMD_INLINE vfloat4 operator/(vfloat4 a, vfloat4 b)
- {
- return vfloat4(a.m[0] / b.m[0],
- a.m[1] / b.m[1],
- a.m[2] / b.m[2],
- a.m[3] / b.m[3]);
- }
- /**
- * @brief Overload: vector by vector equality.
- */
- ASTCENC_SIMD_INLINE vmask4 operator==(vfloat4 a, vfloat4 b)
- {
- return vmask4(a.m[0] == b.m[0],
- a.m[1] == b.m[1],
- a.m[2] == b.m[2],
- a.m[3] == b.m[3]);
- }
- /**
- * @brief Overload: vector by vector inequality.
- */
- ASTCENC_SIMD_INLINE vmask4 operator!=(vfloat4 a, vfloat4 b)
- {
- return vmask4(a.m[0] != b.m[0],
- a.m[1] != b.m[1],
- a.m[2] != b.m[2],
- a.m[3] != b.m[3]);
- }
- /**
- * @brief Overload: vector by vector less than.
- */
- ASTCENC_SIMD_INLINE vmask4 operator<(vfloat4 a, vfloat4 b)
- {
- return vmask4(a.m[0] < b.m[0],
- a.m[1] < b.m[1],
- a.m[2] < b.m[2],
- a.m[3] < b.m[3]);
- }
- /**
- * @brief Overload: vector by vector greater than.
- */
- ASTCENC_SIMD_INLINE vmask4 operator>(vfloat4 a, vfloat4 b)
- {
- return vmask4(a.m[0] > b.m[0],
- a.m[1] > b.m[1],
- a.m[2] > b.m[2],
- a.m[3] > b.m[3]);
- }
- /**
- * @brief Overload: vector by vector less than or equal.
- */
- ASTCENC_SIMD_INLINE vmask4 operator<=(vfloat4 a, vfloat4 b)
- {
- return vmask4(a.m[0] <= b.m[0],
- a.m[1] <= b.m[1],
- a.m[2] <= b.m[2],
- a.m[3] <= b.m[3]);
- }
- /**
- * @brief Overload: vector by vector greater than or equal.
- */
- ASTCENC_SIMD_INLINE vmask4 operator>=(vfloat4 a, vfloat4 b)
- {
- return vmask4(a.m[0] >= b.m[0],
- a.m[1] >= b.m[1],
- a.m[2] >= b.m[2],
- a.m[3] >= b.m[3]);
- }
- /**
- * @brief Return the min vector of two vectors.
- *
- * If either lane value is NaN, @c b will be returned for that lane.
- */
- ASTCENC_SIMD_INLINE vfloat4 min(vfloat4 a, vfloat4 b)
- {
- return vfloat4(a.m[0] < b.m[0] ? a.m[0] : b.m[0],
- a.m[1] < b.m[1] ? a.m[1] : b.m[1],
- a.m[2] < b.m[2] ? a.m[2] : b.m[2],
- a.m[3] < b.m[3] ? a.m[3] : b.m[3]);
- }
- /**
- * @brief Return the max vector of two vectors.
- *
- * If either lane value is NaN, @c b will be returned for that lane.
- */
- ASTCENC_SIMD_INLINE vfloat4 max(vfloat4 a, vfloat4 b)
- {
- return vfloat4(a.m[0] > b.m[0] ? a.m[0] : b.m[0],
- a.m[1] > b.m[1] ? a.m[1] : b.m[1],
- a.m[2] > b.m[2] ? a.m[2] : b.m[2],
- a.m[3] > b.m[3] ? a.m[3] : b.m[3]);
- }
- /**
- * @brief Return the absolute value of the float vector.
- */
- ASTCENC_SIMD_INLINE vfloat4 abs(vfloat4 a)
- {
- return vfloat4(std::abs(a.m[0]),
- std::abs(a.m[1]),
- std::abs(a.m[2]),
- std::abs(a.m[3]));
- }
- /**
- * @brief Return a float rounded to the nearest integer value.
- */
- ASTCENC_SIMD_INLINE vfloat4 round(vfloat4 a)
- {
- assert(std::fegetround() == FE_TONEAREST);
- return vfloat4(std::nearbyint(a.m[0]),
- std::nearbyint(a.m[1]),
- std::nearbyint(a.m[2]),
- std::nearbyint(a.m[3]));
- }
- /**
- * @brief Return the horizontal minimum of a vector.
- */
- ASTCENC_SIMD_INLINE vfloat4 hmin(vfloat4 a)
- {
- float tmp1 = std::min(a.m[0], a.m[1]);
- float tmp2 = std::min(a.m[2], a.m[3]);
- return vfloat4(std::min(tmp1, tmp2));
- }
- /**
- * @brief Return the horizontal maximum of a vector.
- */
- ASTCENC_SIMD_INLINE vfloat4 hmax(vfloat4 a)
- {
- float tmp1 = std::max(a.m[0], a.m[1]);
- float tmp2 = std::max(a.m[2], a.m[3]);
- return vfloat4(std::max(tmp1, tmp2));
- }
- /**
- * @brief Return the horizontal sum of a vector.
- */
- ASTCENC_SIMD_INLINE float hadd_s(vfloat4 a)
- {
- // Use halving add, gives invariance with SIMD versions
- return (a.m[0] + a.m[2]) + (a.m[1] + a.m[3]);
- }
- /**
- * @brief Return the sqrt of the lanes in the vector.
- */
- ASTCENC_SIMD_INLINE vfloat4 sqrt(vfloat4 a)
- {
- return vfloat4(std::sqrt(a.m[0]),
- std::sqrt(a.m[1]),
- std::sqrt(a.m[2]),
- std::sqrt(a.m[3]));
- }
- /**
- * @brief Return lanes from @c b if @c cond is set, else @c a.
- */
- ASTCENC_SIMD_INLINE vfloat4 select(vfloat4 a, vfloat4 b, vmask4 cond)
- {
- return vfloat4((cond.m[0] & static_cast<int>(0x80000000)) ? b.m[0] : a.m[0],
- (cond.m[1] & static_cast<int>(0x80000000)) ? b.m[1] : a.m[1],
- (cond.m[2] & static_cast<int>(0x80000000)) ? b.m[2] : a.m[2],
- (cond.m[3] & static_cast<int>(0x80000000)) ? b.m[3] : a.m[3]);
- }
- /**
- * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.
- */
- ASTCENC_SIMD_INLINE vfloat4 select_msb(vfloat4 a, vfloat4 b, vmask4 cond)
- {
- return vfloat4((cond.m[0] & static_cast<int>(0x80000000)) ? b.m[0] : a.m[0],
- (cond.m[1] & static_cast<int>(0x80000000)) ? b.m[1] : a.m[1],
- (cond.m[2] & static_cast<int>(0x80000000)) ? b.m[2] : a.m[2],
- (cond.m[3] & static_cast<int>(0x80000000)) ? b.m[3] : a.m[3]);
- }
- /**
- * @brief Load a vector of gathered results from an array;
- */
- ASTCENC_SIMD_INLINE vfloat4 gatherf(const float* base, vint4 indices)
- {
- return vfloat4(base[indices.m[0]],
- base[indices.m[1]],
- base[indices.m[2]],
- base[indices.m[3]]);
- }
- /**
- * @brief Store a vector to an unaligned memory address.
- */
- ASTCENC_SIMD_INLINE void store(vfloat4 a, float* ptr)
- {
- ptr[0] = a.m[0];
- ptr[1] = a.m[1];
- ptr[2] = a.m[2];
- ptr[3] = a.m[3];
- }
- /**
- * @brief Store a vector to an aligned memory address.
- */
- ASTCENC_SIMD_INLINE void storea(vfloat4 a, float* ptr)
- {
- ptr[0] = a.m[0];
- ptr[1] = a.m[1];
- ptr[2] = a.m[2];
- ptr[3] = a.m[3];
- }
- /**
- * @brief Return a integer value for a float vector, using truncation.
- */
- ASTCENC_SIMD_INLINE vint4 float_to_int(vfloat4 a)
- {
- return vint4(static_cast<int>(a.m[0]),
- static_cast<int>(a.m[1]),
- static_cast<int>(a.m[2]),
- static_cast<int>(a.m[3]));
- }
- /**f
- * @brief Return a integer value for a float vector, using round-to-nearest.
- */
- ASTCENC_SIMD_INLINE vint4 float_to_int_rtn(vfloat4 a)
- {
- return vint4(static_cast<int>(a.m[0] + 0.5f),
- static_cast<int>(a.m[1] + 0.5f),
- static_cast<int>(a.m[2] + 0.5f),
- static_cast<int>(a.m[3] + 0.5f));
- }
- /**
- * @brief Return a float value for a integer vector.
- */
- ASTCENC_SIMD_INLINE vfloat4 int_to_float(vint4 a)
- {
- return vfloat4(static_cast<float>(a.m[0]),
- static_cast<float>(a.m[1]),
- static_cast<float>(a.m[2]),
- static_cast<float>(a.m[3]));
- }
- /**
- * @brief Return a float16 value for a float vector, using round-to-nearest.
- */
- ASTCENC_SIMD_INLINE vint4 float_to_float16(vfloat4 a)
- {
- return vint4(
- float_to_sf16(a.lane<0>()),
- float_to_sf16(a.lane<1>()),
- float_to_sf16(a.lane<2>()),
- float_to_sf16(a.lane<3>()));
- }
- /**
- * @brief Return a float16 value for a float scalar, using round-to-nearest.
- */
- static inline uint16_t float_to_float16(float a)
- {
- return float_to_sf16(a);
- }
- /**
- * @brief Return a float value for a float16 vector.
- */
- ASTCENC_SIMD_INLINE vfloat4 float16_to_float(vint4 a)
- {
- return vfloat4(
- sf16_to_float(static_cast<uint16_t>(a.lane<0>())),
- sf16_to_float(static_cast<uint16_t>(a.lane<1>())),
- sf16_to_float(static_cast<uint16_t>(a.lane<2>())),
- sf16_to_float(static_cast<uint16_t>(a.lane<3>())));
- }
- /**
- * @brief Return a float value for a float16 scalar.
- */
- ASTCENC_SIMD_INLINE float float16_to_float(uint16_t a)
- {
- return sf16_to_float(a);
- }
- /**
- * @brief Return a float value as an integer bit pattern (i.e. no conversion).
- *
- * It is a common trick to convert floats into integer bit patterns, perform
- * some bit hackery based on knowledge they are IEEE 754 layout, and then
- * convert them back again. This is the first half of that flip.
- */
- ASTCENC_SIMD_INLINE vint4 float_as_int(vfloat4 a)
- {
- vint4 r;
- memcpy(r.m, a.m, 4 * 4);
- return r;
- }
- /**
- * @brief Return a integer value as a float bit pattern (i.e. no conversion).
- *
- * It is a common trick to convert floats into integer bit patterns, perform
- * some bit hackery based on knowledge they are IEEE 754 layout, and then
- * convert them back again. This is the second half of that flip.
- */
- ASTCENC_SIMD_INLINE vfloat4 int_as_float(vint4 a)
- {
- vfloat4 r;
- memcpy(r.m, a.m, 4 * 4);
- return r;
- }
- /**
- * @brief Prepare a vtable lookup table for use with the native SIMD size.
- */
- ASTCENC_SIMD_INLINE void vtable_prepare(vint4 t0, vint4& t0p)
- {
- t0p = t0;
- }
- /**
- * @brief Prepare a vtable lookup table for use with the native SIMD size.
- */
- ASTCENC_SIMD_INLINE void vtable_prepare(vint4 t0, vint4 t1, vint4& t0p, vint4& t1p)
- {
- t0p = t0;
- t1p = t1;
- }
- /**
- * @brief Prepare a vtable lookup table for use with the native SIMD size.
- */
- ASTCENC_SIMD_INLINE void vtable_prepare(
- vint4 t0, vint4 t1, vint4 t2, vint4 t3,
- vint4& t0p, vint4& t1p, vint4& t2p, vint4& t3p)
- {
- t0p = t0;
- t1p = t1;
- t2p = t2;
- t3p = t3;
- }
- /**
- * @brief Perform an 8-bit 32-entry table lookup, with 32-bit indexes.
- */
- ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 idx)
- {
- uint8_t table[16];
- storea(t0, reinterpret_cast<int*>(table + 0));
- return vint4(table[idx.lane<0>()],
- table[idx.lane<1>()],
- table[idx.lane<2>()],
- table[idx.lane<3>()]);
- }
- /**
- * @brief Perform an 8-bit 32-entry table lookup, with 32-bit indexes.
- */
- ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 idx)
- {
- uint8_t table[32];
- storea(t0, reinterpret_cast<int*>(table + 0));
- storea(t1, reinterpret_cast<int*>(table + 16));
- return vint4(table[idx.lane<0>()],
- table[idx.lane<1>()],
- table[idx.lane<2>()],
- table[idx.lane<3>()]);
- }
- /**
- * @brief Perform an 8-bit 64-entry table lookup, with 32-bit indexes.
- */
- ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3, vint4 idx)
- {
- uint8_t table[64];
- storea(t0, reinterpret_cast<int*>(table + 0));
- storea(t1, reinterpret_cast<int*>(table + 16));
- storea(t2, reinterpret_cast<int*>(table + 32));
- storea(t3, reinterpret_cast<int*>(table + 48));
- return vint4(table[idx.lane<0>()],
- table[idx.lane<1>()],
- table[idx.lane<2>()],
- table[idx.lane<3>()]);
- }
- /**
- * @brief Return a vector of interleaved RGBA data.
- *
- * Input vectors have the value stored in the bottom 8 bits of each lane,
- * with high bits set to zero.
- *
- * Output vector stores a single RGBA texel packed in each lane.
- */
- ASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)
- {
- return r + lsl<8>(g) + lsl<16>(b) + lsl<24>(a);
- }
- /**
- * @brief Store a vector, skipping masked lanes.
- *
- * All masked lanes must be at the end of vector, after all non-masked lanes.
- */
- ASTCENC_SIMD_INLINE void store_lanes_masked(int* base, vint4 data, vmask4 mask)
- {
- if (mask.m[3])
- {
- store(data, base);
- }
- else if (mask.m[2])
- {
- base[0] = data.lane<0>();
- base[1] = data.lane<1>();
- base[2] = data.lane<2>();
- }
- else if (mask.m[1])
- {
- base[0] = data.lane<0>();
- base[1] = data.lane<1>();
- }
- else if (mask.m[0])
- {
- base[0] = data.lane<0>();
- }
- }
- #endif // #ifndef ASTC_VECMATHLIB_NONE_4_H_INCLUDED
|