commit ba2a7744a8f214132e884fbe0dcb1b1ae1982409
parent d84c2fbd0dd9eef97cbf4abb5749f0157a1f0af0
Author: d.levin256@gmail.com <d.levin256@gmail.com>
Date: Wed, 11 Oct 2023 14:30:36 +0100
Remove unnecessary unwrap_bit_value
Diffstat:
1 file changed, 13 insertions(+), 15 deletions(-)
diff --git a/include/kfr/simd/impl/backend_generic.hpp b/include/kfr/simd/impl/backend_generic.hpp
@@ -1179,9 +1179,10 @@ KFR_INTRINSIC simd<Tout, Nout> simd_bitcast(simd_cvt_t<Tout, Tin, N>, const simd
constexpr size_t Nlow = prev_poweroftwo(N - 1);
return simd_concat<Tout, Nlow * Nout / N, (N - Nlow) * Nout / N>(
simd_bitcast(simd_cvt_t<Tout, Tin, Nlow>{},
- unwrap_bit_value(simd_shuffle(simd_t<Tin, N>{}, x, csizeseq<Nlow>, overload_auto))),
- simd_bitcast(simd_cvt_t<Tout, Tin, N - Nlow>{},
- unwrap_bit_value(simd_shuffle(simd_t<Tin, N>{}, x, csizeseq<N - Nlow, Nlow>, overload_auto))));
+ unwrap_bit_value(simd_shuffle(simd_t<Tin, N>{}, x, csizeseq<Nlow>, overload_auto))),
+ simd_bitcast(
+ simd_cvt_t<Tout, Tin, N - Nlow>{},
+ unwrap_bit_value(simd_shuffle(simd_t<Tin, N>{}, x, csizeseq<N - Nlow, Nlow>, overload_auto))));
}
template <typename T, size_t N>
@@ -1267,7 +1268,7 @@ KFR_INTRINSIC simd<T, N / 2> simd_shuffle(simd_t<T, N>, const simd<T, N>& x, csi
template <typename T, size_t N, size_t index>
KFR_INTRINSIC T simd_shuffle(simd_t<T, N>, const simd<T, N>& x, csizes_t<index>,
- overload_priority<6>) CMT_NOEXCEPT
+ overload_priority<6>) CMT_NOEXCEPT
{
return to_simd_array<T, N>(x).val[index];
}
@@ -1309,8 +1310,7 @@ KFR_INTRINSIC simd<T, Nout> simd_shuffle(simd_t<T, N>, const simd<T, N>& x, csiz
constexpr static unsigned indices_array[] = { static_cast<unsigned>(indices)... };
return from_simd_array<T, Nout>(simd_shuffle_generic<T, Nout, N>(xx, indices_array));
#else
- return from_simd_array<T, Nout>(
- { (indices >= N ? T() : unwrap_bit_value(to_simd_array<T, N>(x).val[indices]))... });
+ return from_simd_array<T, Nout>({ (indices >= N ? T() : to_simd_array<T, N>(x).val[indices])... });
#endif
}
@@ -1326,10 +1326,9 @@ KFR_INTRINSIC simd<T, Nout> simd_shuffle(simd2_t<T, N, N>, const simd<T, N>& x,
constexpr static unsigned indices_array[] = { static_cast<unsigned>(indices)... };
return from_simd_array<T, Nout>(simd_shuffle2_generic<T, Nout, N, N>(xx, yy, indices_array));
#else
- return from_simd_array<T, Nout>(
- { (indices >= N * 2 ? T()
- : indices >= N ? unwrap_bit_value(to_simd_array<T, N>(y).val[indices - N])
- : unwrap_bit_value(to_simd_array<T, N>(x).val[indices]))... });
+ return from_simd_array<T, Nout>({ (indices >= N * 2 ? T()
+ : indices >= N ? to_simd_array<T, N>(y).val[indices - N]
+ : to_simd_array<T, N>(x).val[indices])... });
#endif
}
@@ -1347,10 +1346,9 @@ KFR_INTRINSIC simd<T, Nout> simd_shuffle(simd2_t<T, N1, N2>, const simd<T, N1>&
return from_simd_array<T, Nout>(simd_shuffle2_generic<T, Nout, N1, N2>(xx, yy, indices_array));
#else
- return from_simd_array<T, Nout>(
- { (indices > N1 + N2 ? T()
- : indices >= N1 ? unwrap_bit_value(to_simd_array<T, N2>(y).val[indices - N1])
- : unwrap_bit_value(to_simd_array<T, N1>(x).val[indices]))... });
+ return from_simd_array<T, Nout>({ (indices > N1 + N2 ? T()
+ : indices >= N1 ? to_simd_array<T, N2>(y).val[indices - N1]
+ : to_simd_array<T, N1>(x).val[indices])... });
#endif
}
@@ -1441,7 +1439,7 @@ KFR_INTRINSIC simd<T, N> simd_set_element(const simd<T, N>& value, size_t index,
}
#define SIMD_TYPE_INTRIN(T, N, TO_SCALAR, FROM_SCALAR, FROM_BROADCAST, FROM_ZERO) \
- KFR_INTRINSIC T simd_to_scalar(simd_t<T, N>, const simd<T, N>& x) { return TO_SCALAR; } \
+ KFR_INTRINSIC T simd_to_scalar(simd_t<T, N>, const simd<T, N>& x) { return TO_SCALAR; } \
KFR_INTRINSIC simd<T, N> simd_from_scalar(simd_t<T, N>, unwrap_bit<T> x) { return FROM_SCALAR; } \
KFR_INTRINSIC simd<T, N> simd_from_broadcast(simd_t<T, N>, unwrap_bit<T> x) { return FROM_BROADCAST; } \
KFR_INTRINSIC simd<T, N> simd_from_zero(simd_t<T, N>) { return FROM_ZERO; }