commit a38bef7ed41b72f71eb08848c3203e25e318a11c
parent 10faf3feba5ec538d51e2072844e4745fa6a1f59
Author: d.levin256@gmail.com <d.levin256@gmail.com>
Date: Tue, 19 Nov 2019 15:35:47 +0000
Fix typo and format code
Diffstat:
10 files changed, 36 insertions(+), 26 deletions(-)
diff --git a/format-all.py b/format-all.py
@@ -10,12 +10,21 @@ import glob
path = os.path.dirname(os.path.realpath(__file__))
masks = ['*.hpp', '*.h', '*.cpp', '*.c', '*.cxx']
+ignore = ['build/*', 'build-*', 'cmake-*', '.*']
filenames = []
for root, dirnames, files in os.walk(path, path):
- for mask in masks:
- for filename in fnmatch.filter(files, mask):
- filenames.append(os.path.join(root, filename))
+ ignore_dir = False
+ for mask in ignore:
+ if fnmatch.fnmatch(os.path.relpath(root, path), mask):
+ ignore_dir = True
+ break
+
+ if not ignore_dir:
+ for mask in masks:
+ for filename in fnmatch.filter(files, mask):
+ filenames.append(os.path.join(root, filename))
+
for filename in filenames:
print( filename, '...' )
diff --git a/include/kfr/base/sort.hpp b/include/kfr/base/sort.hpp
@@ -64,7 +64,7 @@ KFR_INTRINSIC vec<T, N> sort(const vec<T, N>& x)
o = rotateleft<1>(o);
e = t;
}
- return interleavehalfs(concat(e, o));
+ return interleavehalves(concat(e, o));
}
/**
@@ -97,7 +97,7 @@ KFR_INTRINSIC vec<T, N> sortdesc(const vec<T, N>& x)
o = rotateleft<1>(o);
e = t;
}
- return interleavehalfs(concat(e, o));
+ return interleavehalves(concat(e, o));
}
} // namespace CMT_ARCH_NAME
} // namespace kfr
diff --git a/include/kfr/dft/impl/dft-fft.hpp b/include/kfr/dft/impl/dft-fft.hpp
@@ -39,11 +39,11 @@ namespace kfr
{
#define DFT_STAGE_FN \
- KFR_MEM_INTRINSIC void do_execute(cdirect_t, complex<T>* out, const complex<T>* in, u8* temp) final \
+ KFR_MEM_INTRINSIC void do_execute(cdirect_t, complex<T>* out, const complex<T>* in, u8* temp) final \
{ \
return do_execute<false>(out, in, temp); \
} \
- KFR_MEM_INTRINSIC void do_execute(cinvert_t, complex<T>* out, const complex<T>* in, u8* temp) final \
+ KFR_MEM_INTRINSIC void do_execute(cinvert_t, complex<T>* out, const complex<T>* in, u8* temp) final \
{ \
return do_execute<true>(out, in, temp); \
}
diff --git a/include/kfr/dft/impl/ft.hpp b/include/kfr/dft/impl/ft.hpp
@@ -185,7 +185,7 @@ KFR_INTRINSIC void cwrite_split(complex<T>* dest, const cvec<T, N>& value)
{
cvec<T, N> v = value;
if (split)
- v = interleavehalfs(v);
+ v = interleavehalves(v);
v.write(ptr_cast<T>(dest), cbool_t<A>());
}
diff --git a/include/kfr/dsp/mixdown.hpp b/include/kfr/dsp/mixdown.hpp
@@ -65,7 +65,7 @@ CMT_GNU_CONSTEXPR f64x2x2 matrix_sum_diff()
return { f64x2{ 1, 1 }, f64x2{ 1, -1 } };
}
template <int = 0>
-CMT_GNU_CONSTEXPR f64x2x2 matrix_halfsum_halfdiff()
+CMT_GNU_CONSTEXPR f64x2x2 matrix_halvesum_halfdiff()
{
return { f64x2{ 0.5, 0.5 }, f64x2{ 0.5, -0.5 } };
}
diff --git a/include/kfr/simd/complex.hpp b/include/kfr/simd/complex.hpp
@@ -224,15 +224,16 @@ template <typename T, size_t N, size_t... indices>
KFR_INTRINSIC vec<complex<T>, sizeof...(indices)> shufflevector(const vec<complex<T>, N>& x,
csizes_t<indices...>) CMT_NOEXCEPT
{
- return intrinsics::simd_shuffle(intrinsics::simd_t<unwrap_bit<T>, N>{}, x.v, scale<2, indices...>(), overload_auto);
+ return intrinsics::simd_shuffle(intrinsics::simd_t<unwrap_bit<T>, N>{}, x.v, scale<2, indices...>(),
+ overload_auto);
}
template <typename T, size_t N, size_t... indices>
KFR_INTRINSIC vec<complex<T>, sizeof...(indices)> shufflevectors(const vec<complex<T>, N>& x,
const vec<T, N>& y,
csizes_t<indices...>) CMT_NOEXCEPT
{
- return intrinsics::simd_shuffle(intrinsics::simd2_t<unwrap_bit<T>, N, N>{}, x.v, y.v, scale<2, indices...>(),
- overload_auto);
+ return intrinsics::simd_shuffle(intrinsics::simd2_t<unwrap_bit<T>, N, N>{}, x.v, y.v,
+ scale<2, indices...>(), overload_auto);
}
namespace internal
{
diff --git a/include/kfr/simd/impl/specializations.i b/include/kfr/simd/impl/specializations.i
@@ -108,7 +108,7 @@ inline vec<f32, 32> shufflevector<f32, 32>(
{
const vec<f32, 32> xx = permutegroups<(8), 0, 2, 1, 3>(x);
- return concat(interleavehalfs(low(xx)), interleavehalfs(high(xx)));
+ return concat(interleavehalves(low(xx)), interleavehalves(high(xx)));
}
} // namespace intrinsics
} // namespace CMT_ARCH_NAME
diff --git a/include/kfr/simd/shuffle.hpp b/include/kfr/simd/shuffle.hpp
@@ -284,11 +284,11 @@ KFR_INTRINSIC vec<T, N> dupodd(const vec<T, N>& x)
KFR_FN(dupodd)
template <typename T, size_t N>
-KFR_INTRINSIC vec<T, N * 2> duphalfs(const vec<T, N>& x)
+KFR_INTRINSIC vec<T, N * 2> duphalves(const vec<T, N>& x)
{
return x.shuffle(csizeseq<N * 2> % csize<N>);
}
-KFR_FN(duphalfs)
+KFR_FN(duphalves)
template <size_t... Indices, typename T, size_t N, size_t count = sizeof...(Indices)>
KFR_INTRINSIC vec<T, N> shuffle(const vec<T, N>& x, const vec<T, N>& y,
@@ -485,12 +485,12 @@ KFR_FN(interleave)
template <size_t group = 1, typename T, size_t N, size_t size = N / group, size_t side2 = 2,
size_t side1 = size / side2>
-KFR_INTRINSIC vec<T, N> interleavehalfs(const vec<T, N>& x)
+KFR_INTRINSIC vec<T, N> interleavehalves(const vec<T, N>& x)
{
return x.shuffle(scale<group>(csizeseq_t<size>() % csize_t<side2>() * csize_t<side1>() +
csizeseq_t<size>() / csize_t<side2>()));
}
-KFR_FN(interleavehalfs)
+KFR_FN(interleavehalves)
template <size_t group = 1, typename T, size_t N, size_t size = N / group, size_t side1 = 2,
size_t side2 = size / side1>
diff --git a/tests/asm_test.cpp b/tests/asm_test.cpp
@@ -233,7 +233,7 @@ TEST_ASM_UIF(shl, TEST_ASM_SHIFT_SCALAR)
TEST_ASM_UIF(shr, TEST_ASM_SHIFT_SCALAR)
-TEST_ASM_UIF(duphalfs, TEST_ASM_DOUBLE1)
+TEST_ASM_UIF(duphalves, TEST_ASM_DOUBLE1)
TEST_ASM_UIF(sqr, TEST_ASM_VTY1)
@@ -245,7 +245,7 @@ TEST_ASM_UIF(read, TEST_READ)
TEST_ASM_UIF(write, TEST_WRITE)
-#define TEST_FFT_SPEC(ty, size) \
+#define TEST_FFT_SPEC(ty, size) \
static intrinsics::fft_specialization<ty, size> fft__##ty##__##size(static_cast<size_t>(1 << size)); \
KFR_PUBLIC void asm__test__fft__##ty##__##size(complex<ty>* out, const complex<ty>* in, u8* temp) \
{ \
@@ -255,13 +255,13 @@ TEST_ASM_UIF(write, TEST_WRITE)
{ \
fft__##ty##__##size.do_execute<true>(out, in, temp); \
}
-#define TEST_FFT_GEN(ty) \
- static intrinsics::fft_stage_impl<ty, true, true> fft__##ty##__##size(static_cast<size_t>(65526)); \
- KFR_PUBLIC void asm__test__fft__##ty##__gen(complex<ty>* out, const complex<ty>* in, u8* temp) \
+#define TEST_FFT_GEN(ty) \
+ static intrinsics::fft_stage_impl<ty, true, true> fft__##ty##__##size(static_cast<size_t>(65526)); \
+ KFR_PUBLIC void asm__test__fft__##ty##__gen(complex<ty>* out, const complex<ty>* in, u8* temp) \
{ \
fft__##ty##__##size.do_execute<false>(out, in, temp); \
} \
- KFR_PUBLIC void asm__test__ifft__##ty##__gen(complex<ty>* out, const complex<ty>* in, u8* temp) \
+ KFR_PUBLIC void asm__test__ifft__##ty##__gen(complex<ty>* out, const complex<ty>* in, u8* temp) \
{ \
fft__##ty##__##size.do_execute<true>(out, in, temp); \
}
diff --git a/tests/unit/simd/shuffle.cpp b/tests/unit/simd/shuffle.cpp
@@ -51,7 +51,7 @@ TEST(blend)
TEST(duplicate_shuffle)
{
CHECK(dup(pack(0, 1, 2, 3)) == pack(0, 0, 1, 1, 2, 2, 3, 3));
- CHECK(duphalfs(pack(0, 1, 2, 3)) == pack(0, 1, 2, 3, 0, 1, 2, 3));
+ CHECK(duphalves(pack(0, 1, 2, 3)) == pack(0, 1, 2, 3, 0, 1, 2, 3));
CHECK(dupeven(pack(0, 1, 2, 3, 4, 5, 6, 7)) == pack(0, 0, 2, 2, 4, 4, 6, 6));
CHECK(dupodd(pack(0, 1, 2, 3, 4, 5, 6, 7)) == pack(1, 1, 3, 3, 5, 5, 7, 7));
}
@@ -71,8 +71,8 @@ TEST(split_interleave)
CHECK(splitpairs(pack(0, 1, 2, 3, 4, 5, 6, 7)) == pack(0, 2, 4, 6, 1, 3, 5, 7));
CHECK(splitpairs<2>(pack(0, 1, 2, 3, 4, 5, 6, 7)) == pack(0, 1, 4, 5, 2, 3, 6, 7));
- CHECK(interleavehalfs(pack(0, 1, 2, 3, 4, 5, 6, 7)) == pack(0, 4, 1, 5, 2, 6, 3, 7));
- CHECK(interleavehalfs<2>(pack(0, 1, 2, 3, 4, 5, 6, 7)) == pack(0, 1, 4, 5, 2, 3, 6, 7));
+ CHECK(interleavehalves(pack(0, 1, 2, 3, 4, 5, 6, 7)) == pack(0, 4, 1, 5, 2, 6, 3, 7));
+ CHECK(interleavehalves<2>(pack(0, 1, 2, 3, 4, 5, 6, 7)) == pack(0, 1, 4, 5, 2, 3, 6, 7));
}
TEST(broadcast)