I am given a array of lowercase characters (up to 1.5Gb) and a character c. And I want to find how many occurrences are of the character c using AVX instructions.
I'm intentionally leaving out some parts, which you need to figure out yourself (e.g. handling lengths that aren't a multiple of 4*255*32
bytes), but your most inner loop should look something like the one starting with for(int i...)
:
_mm256_cmpeq_epi8
will get you a -1 in each byte, which you can use as an integer. If you subtract that from a counter (using _mm256_sub_epi8
) you can directly count up to 255 or 128. The inner loop contains just these two intrinsics. You have to stop and
#include <immintrin.h>
#include <stdint.h>
static inline
__m256i hsum_epu8_epu64(__m256i v) {
return _mm256_sad_epu8(v, _mm256_setzero_si256()); // SAD against zero is a handy trick
}
static inline
uint64_t hsum_epu64_scalar(__m256i v) {
__m128i lo = _mm256_castsi256_si128(v);
__m128i hi = _mm256_extracti128_si256(v, 1);
__m128i sum2x64 = _mm_add_epi64(lo, hi); // narrow to 128
hi = _mm_unpackhi_epi64(sum2x64, sum2x64);
__m128i sum = _mm_add_epi64(hi, sum2x64); // narrow to 64
return _mm_cvtsi128_si64(sum);
}
unsigned long long char_count_AVX2(char const* vector, size_t size, char c)
{
__m256i C=_mm256_set1_epi8(c);
// todo: count elements and increment `vector` until it is aligned to 256bits (=32 bytes)
__m256i const * simd_vector = (__m256i const *) vector;
// *simd_vector is an alignment-required load, unlike _mm256_loadu_si256()
__m256i sum64 = _mm256_setzero_si256();
size_t unrolled_size_limit = size - 4*255*32 + 1;
for(size_t k=0; k<unrolled_size_limit ; k+=4*255*32) // outer loop: TODO
{
__m256i counter[4]; // multiple counter registers to hide latencies
for(int j=0; j<4; j++)
counter[j]=_mm256_setzero_si256();
// inner loop: make sure that you don't go beyond the data you can read
for(int i=0; i<255; ++i)
{ // or limit this inner loop to ~22 to avoid branch mispredicts
for(int j=0; j<4; ++j)
{
counter[j]=_mm256_sub_epi8(counter[j], // count -= 0 or -1
_mm256_cmpeq_epi8(*simd_vector, C));
++simd_vector;
}
}
// only need one outer accumulator: OoO exec hides the latency of adding into it
sum64 = _mm256_add_epi64(sum64, hsum_epu8_epu64(counter[0]));
sum64 = _mm256_add_epi64(sum64, hsum_epu8_epu64(counter[1]));
sum64 = _mm256_add_epi64(sum64, hsum_epu8_epu64(counter[2]));
sum64 = _mm256_add_epi64(sum64, hsum_epu8_epu64(counter[3]));
}
uint64_t sum = hsum_epu64_scalar(sum64);
// TODO add up remaining bytes with sum.
// Including a rolled-up vector loop before going scalar
// because we're potentially a *long* way from the end
// Maybe put some logic into the main loop to shorten the 255 inner iterations
// if we're close to the end. A little bit of scalar work there shouldn't hurt every 255 iters.
return sum;
}
Godbolt link: https://godbolt.org/z/do5e3- (clang is slightly better than gcc at unrolling the most inner loop: gcc includes some useless vmovdqa
instructions that will bottleneck the front-end if the data is hot in L1d cache, preventing us from running close to 2x 32-byte loads per clock)
If you don't insist on using only SIMD instructions, you can make use
of the VPMOVMSKB instruction in combination with the POPCNT instruction. The former combines the highest bits of each byte into a 32-bit integer mask and the latter counts the 1
bits in this integer (=the count of char matches).
int couter=0;
for(i=0; i<size; i+=32) {
...
couter +=
_mm_popcnt_u32(
(unsigned int)_mm256_movemask_epi8(
_mm256_cmpeq_epi8( C, *((__m256i*)(vector+i) ))
)
);
...
}
I haven't tested this solution, but you should get the gist.
Probably the fastest: memcount_avx2 and memcount_sse2
size_t memcount_avx2(const void *s, int c, size_t n)
{
__m256i cv = _mm256_set1_epi8(c),
zv = _mm256_setzero_si256(),
sum = zv, acr0,acr1,acr2,acr3;
const char *p,*pe;
for(p = s; p != (char *)s+(n- (n % (252*32)));)
{
for(acr0 = acr1 = acr2 = acr3 = zv, pe = p+252*32; p != pe; p += 128)
{
acr0 = _mm256_sub_epi8(acr0, _mm256_cmpeq_epi8(cv, _mm256_lddqu_si256((const __m256i *)p)));
acr1 = _mm256_sub_epi8(acr1, _mm256_cmpeq_epi8(cv, _mm256_lddqu_si256((const __m256i *)(p+32))));
acr2 = _mm256_sub_epi8(acr2, _mm256_cmpeq_epi8(cv, _mm256_lddqu_si256((const __m256i *)(p+64))));
acr3 = _mm256_sub_epi8(acr3, _mm256_cmpeq_epi8(cv, _mm256_lddqu_si256((const __m256i *)(p+96))));
__builtin_prefetch(p+1024);
}
sum = _mm256_add_epi64(sum, _mm256_sad_epu8(acr0, zv));
sum = _mm256_add_epi64(sum, _mm256_sad_epu8(acr1, zv));
sum = _mm256_add_epi64(sum, _mm256_sad_epu8(acr2, zv));
sum = _mm256_add_epi64(sum, _mm256_sad_epu8(acr3, zv));
}
for(acr0 = zv; p+32 < (char *)s + n; p += 32)
acr0 = _mm256_sub_epi8(acr0, _mm256_cmpeq_epi8(cv, _mm256_lddqu_si256((const __m256i *)p)));
sum = _mm256_add_epi64(sum, _mm256_sad_epu8(acr0, zv));
size_t count = _mm256_extract_epi64(sum, 0)
+ _mm256_extract_epi64(sum, 1)
+ _mm256_extract_epi64(sum, 2)
+ _mm256_extract_epi64(sum, 3);
while(p != (char *)s + n)
count += *p++ == c;
return count;
}
Benchmark skylake i7-6700 - 3.4GHz - gcc 8.3:
memcount_avx2 : 28 GB/s
memcount_sse: 23 GB/s
char_count_AVX2 : 23 GB/s (from post)