12
12
#include < iterator>
13
13
#include < cstdint>
14
14
#include < cassert>
15
-
16
- # include < Siv3D/SIMD.hpp>
17
- # ifndef __SSE2__
18
- # define __SSE2__ 1
19
- # endif
20
- # ifndef __SSSE3__
21
- # define __SSSE3__ 1
22
- # endif
23
- # ifndef __SSE4_1__
24
- # define __SSE4_1__ 1
25
- # endif
26
-
27
- #ifdef __AVX2__
28
- #include < immintrin.h>
29
- #endif
15
+ #include < limits>
30
16
31
17
namespace levenshteinSSE {
32
18
@@ -198,9 +184,9 @@ constexpr std::size_t alignment = 1;
198
184
template <typename Vec1, typename Vec2, typename Iterator1, typename Iterator2>
199
185
struct LevenshteinIterationBase {
200
186
static inline void perform (const Iterator1& a, const Iterator2& b,
201
- std::size_t & i, std::size_t j, std::size_t bLen, Vec1& diag, const Vec2& diag2)
187
+ std::size_t & i, std::size_t j, [[maybe_unused]] std::size_t bLen, Vec1& diag, const Vec2& diag2)
202
188
{
203
- std::size_t min = std::min (diag2[i], diag2[i-1 ]);
189
+ std::uint32_t min = static_cast < std::uint32_t >( std:: min (diag2[i], diag2[i-1 ]) );
204
190
if (min < diag[i-1 ]) {
205
191
diag[i] = min + 1 ;
206
192
}
@@ -280,7 +266,7 @@ static inline void performSIMD(const T* a, const T* b,
280
266
281
267
#ifdef __SSSE3__
282
268
static inline void performSSE (const T* a, const T* b,
283
- std::size_t & i, std::size_t j, std::size_t bLen,
269
+ std::size_t & i, std::size_t j, [[maybe_unused]] std::size_t bLen,
284
270
std::uint32_t * diag, const std::uint32_t * diag2)
285
271
{
286
272
const __m128i one128_epi32 = _mm_set1_epi32 (1 );
@@ -315,10 +301,10 @@ static inline void performSSE(const T* a, const T* b,
315
301
// We support 1, 2, and 4 byte objects for SSE comparison.
316
302
// We always process 16 entries at once, so we may need multiple fetches
317
303
// depending on object size.
318
- if (sizeof (T) <= 2 ) {
304
+ if constexpr (sizeof (T) <= 2 ) {
319
305
__m128i substitutionCost16LX, substitutionCost16HX;
320
306
321
- if (sizeof (T) == 1 ) {
307
+ if constexpr (sizeof (T) == 1 ) {
322
308
__m128i a_ = _mm_loadu_si128 (reinterpret_cast <const __m128i*>(&a[i-16 ]));
323
309
__m128i b_ = _mm_loadu_si128 (reinterpret_cast <const __m128i*>(&b[j-1 ]));
324
310
a_ = _mm_shuffle_epi8 (a_, reversedIdentity128_epi8);
@@ -782,10 +768,10 @@ T levenshteinDiagonal(Iterator1 a, Iterator1 aEnd, Iterator2 b, Iterator2 bEnd)
782
768
::perform (a, b, i, j, bLen, diag, diag2);
783
769
}
784
770
785
- diag[0 ] = k ;
771
+ diag[0 ] = static_cast <T>(k) ;
786
772
787
773
if (k <= aLen) {
788
- diag[k] = k ;
774
+ diag[k] = static_cast <T>(k) ;
789
775
}
790
776
791
777
if (k == aLen + bLen) {
@@ -796,6 +782,8 @@ T levenshteinDiagonal(Iterator1 a, Iterator1 aEnd, Iterator2 b, Iterator2 bEnd)
796
782
// switch buffers
797
783
std::swap (diag, diag2);
798
784
}
785
+
786
+ assert (0 );
799
787
}
800
788
801
789
/* *
0 commit comments