Coverage Report

Created: 2025-07-12 06:15

/src/zlib-ng/arch/x86/compare256_avx512.c
Line
Count
Source (jump to first uncovered line)
1
/* compare256_avx512.c -- AVX512 version of compare256
2
 * Copyright (C) 2025 Hans Kristian Rosbach
3
 * Based on AVX2 implementation by Mika T. Lindqvist
4
 * For conditions of distribution and use, see copyright notice in zlib.h
5
 */
6
7
#include "zbuild.h"
8
#include "zmemory.h"
9
#include "deflate.h"
10
#include "fallback_builtins.h"
11
12
#if defined(X86_AVX512) && defined(HAVE_BUILTIN_CTZLL)
13
14
#include <immintrin.h>
15
#ifdef _MSC_VER
16
#  include <nmmintrin.h>
17
#endif
18
19
0
static inline uint32_t compare256_avx512_static(const uint8_t *src0, const uint8_t *src1) {
20
0
    __m512i zmm_src0_4, zmm_src1_4;
21
0
    __m512i zmm_src0_3, zmm_src1_3;
22
0
    __m512i zmm_src0_2, zmm_src1_2;
23
0
    __m512i zmm_src0_1, zmm_src1_1;
24
0
    __m128i xmm_src0_0, xmm_src1_0;
25
0
    uint64_t mask_1, mask_2, mask_3, mask_4;
26
0
    uint32_t mask_0;
27
28
    // First do a 16byte round before increasing to 64bytes, this reduces the
29
    // penalty for the short matches, and those are usually the most common ones.
30
    // This requires us to overlap on the last round, giving a small penalty
31
    // on matches of 192+ bytes (Still faster than AVX2 though).
32
33
    // 16 bytes
34
0
    xmm_src0_0 = _mm_loadu_si128((__m128i*)src0);
35
0
    xmm_src1_0 = _mm_loadu_si128((__m128i*)src1);
36
0
    mask_0 = (uint32_t)_mm_cmpeq_epu8_mask(xmm_src0_0, xmm_src1_0); // zero-extended to use __builtin_ctz
37
0
    if (mask_0 != 0x0000FFFF) {
38
        // There is potential for using __builtin_ctzg/__builtin_ctzs/_tzcnt_u16/__tzcnt_u16 here
39
0
        uint32_t match_byte = (uint32_t)__builtin_ctz(~mask_0); /* Invert bits so identical = 0 */
40
0
        return match_byte;
41
0
    }
42
43
    // 64 bytes
44
0
    zmm_src0_1 = _mm512_loadu_si512((__m512i*)(src0 + 16));
45
0
    zmm_src1_1 = _mm512_loadu_si512((__m512i*)(src1 + 16));
46
0
    mask_1 = _mm512_cmpeq_epu8_mask(zmm_src0_1, zmm_src1_1);
47
0
    if (mask_1 != 0xFFFFFFFFFFFFFFFF) {
48
0
        uint32_t match_byte = (uint32_t)__builtin_ctzll(~mask_1);
49
0
        return 16 + match_byte;
50
0
    }
51
52
    // 64 bytes
53
0
    zmm_src0_2 = _mm512_loadu_si512((__m512i*)(src0 + 80));
54
0
    zmm_src1_2 = _mm512_loadu_si512((__m512i*)(src1 + 80));
55
0
    mask_2 = _mm512_cmpeq_epu8_mask(zmm_src0_2, zmm_src1_2);
56
0
    if (mask_2 != 0xFFFFFFFFFFFFFFFF) {
57
0
        uint32_t match_byte = (uint32_t)__builtin_ctzll(~mask_2);
58
0
        return 80 + match_byte;
59
0
    }
60
61
    // 64 bytes
62
0
    zmm_src0_3 = _mm512_loadu_si512((__m512i*)(src0 + 144));
63
0
    zmm_src1_3 = _mm512_loadu_si512((__m512i*)(src1 + 144));
64
0
    mask_3 = _mm512_cmpeq_epu8_mask(zmm_src0_3, zmm_src1_3);
65
0
    if (mask_3 != 0xFFFFFFFFFFFFFFFF) {
66
0
        uint32_t match_byte = (uint32_t)__builtin_ctzll(~mask_3);
67
0
        return 144 + match_byte;
68
0
    }
69
70
    // 64 bytes (overlaps the previous 16 bytes for fast tail processing)
71
0
    zmm_src0_4 = _mm512_loadu_si512((__m512i*)(src0 + 192));
72
0
    zmm_src1_4 = _mm512_loadu_si512((__m512i*)(src1 + 192));
73
0
    mask_4 = _mm512_cmpeq_epu8_mask(zmm_src0_4, zmm_src1_4);
74
0
    if (mask_4 != 0xFFFFFFFFFFFFFFFF) {
75
0
        uint32_t match_byte = (uint32_t)__builtin_ctzll(~mask_4);
76
0
        return 192 + match_byte;
77
0
    }
78
79
0
    return 256;
80
0
}
81
82
0
Z_INTERNAL uint32_t compare256_avx512(const uint8_t *src0, const uint8_t *src1) {
83
0
    return compare256_avx512_static(src0, src1);
84
0
}
85
86
#define LONGEST_MATCH       longest_match_avx512
87
0
#define COMPARE256          compare256_avx512_static
88
89
#include "match_tpl.h"
90
91
#define LONGEST_MATCH_SLOW
92
#define LONGEST_MATCH       longest_match_slow_avx512
93
0
#define COMPARE256          compare256_avx512_static
94
95
#include "match_tpl.h"
96
97
#endif