Coverage Report

Created: 2026-02-14 06:59

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/libvpx/vpx_dsp/x86/subtract_avx2.c
Line
Count
Source
1
/*
2
 *  Copyright (c) 2022 The WebM project authors. All Rights Reserved.
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
 *  that can be found in the LICENSE file in the root of the source
6
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
9
 */
10
11
#include <assert.h>
12
#include <immintrin.h>
13
14
#include "./vpx_dsp_rtcd.h"
15
#include "vpx/vpx_integer.h"
16
17
static VPX_FORCE_INLINE void subtract32_avx2(int16_t *diff_ptr,
18
                                             const uint8_t *src_ptr,
19
128M
                                             const uint8_t *pred_ptr) {
20
128M
  const __m256i s = _mm256_lddqu_si256((const __m256i *)src_ptr);
21
128M
  const __m256i p = _mm256_lddqu_si256((const __m256i *)pred_ptr);
22
128M
  const __m256i s_0 = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(s));
23
128M
  const __m256i s_1 = _mm256_cvtepu8_epi16(_mm256_extracti128_si256(s, 1));
24
128M
  const __m256i p_0 = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(p));
25
128M
  const __m256i p_1 = _mm256_cvtepu8_epi16(_mm256_extracti128_si256(p, 1));
26
128M
  const __m256i d_0 = _mm256_sub_epi16(s_0, p_0);
27
128M
  const __m256i d_1 = _mm256_sub_epi16(s_1, p_1);
28
128M
  _mm256_storeu_si256((__m256i *)diff_ptr, d_0);
29
128M
  _mm256_storeu_si256((__m256i *)(diff_ptr + 16), d_1);
30
128M
}
31
32
static VPX_FORCE_INLINE void subtract_block_16xn_avx2(
33
    int rows, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr,
34
24.7M
    ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride) {
35
24.7M
  int j;
36
412M
  for (j = 0; j < rows; ++j) {
37
388M
    const __m128i s = _mm_lddqu_si128((const __m128i *)src_ptr);
38
388M
    const __m128i p = _mm_lddqu_si128((const __m128i *)pred_ptr);
39
388M
    const __m256i s_0 = _mm256_cvtepu8_epi16(s);
40
388M
    const __m256i p_0 = _mm256_cvtepu8_epi16(p);
41
388M
    const __m256i d_0 = _mm256_sub_epi16(s_0, p_0);
42
388M
    _mm256_storeu_si256((__m256i *)diff_ptr, d_0);
43
388M
    src_ptr += src_stride;
44
388M
    pred_ptr += pred_stride;
45
388M
    diff_ptr += diff_stride;
46
388M
  }
47
24.7M
}
48
49
static VPX_FORCE_INLINE void subtract_block_32xn_avx2(
50
    int rows, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr,
51
3.81M
    ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride) {
52
3.81M
  int j;
53
122M
  for (j = 0; j < rows; ++j) {
54
118M
    subtract32_avx2(diff_ptr, src_ptr, pred_ptr);
55
118M
    src_ptr += src_stride;
56
118M
    pred_ptr += pred_stride;
57
118M
    diff_ptr += diff_stride;
58
118M
  }
59
3.81M
}
60
61
static VPX_FORCE_INLINE void subtract_block_64xn_avx2(
62
    int rows, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr,
63
97.9k
    ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride) {
64
97.9k
  int j;
65
5.00M
  for (j = 0; j < rows; ++j) {
66
4.90M
    subtract32_avx2(diff_ptr, src_ptr, pred_ptr);
67
4.90M
    subtract32_avx2(diff_ptr + 32, src_ptr + 32, pred_ptr + 32);
68
4.90M
    src_ptr += src_stride;
69
4.90M
    pred_ptr += pred_stride;
70
4.90M
    diff_ptr += diff_stride;
71
4.90M
  }
72
97.9k
}
73
74
void vpx_subtract_block_avx2(int rows, int cols, int16_t *diff_ptr,
75
                             ptrdiff_t diff_stride, const uint8_t *src_ptr,
76
                             ptrdiff_t src_stride, const uint8_t *pred_ptr,
77
733M
                             ptrdiff_t pred_stride) {
78
733M
  switch (cols) {
79
24.7M
    case 16:
80
24.7M
      subtract_block_16xn_avx2(rows, diff_ptr, diff_stride, src_ptr, src_stride,
81
24.7M
                               pred_ptr, pred_stride);
82
24.7M
      break;
83
3.81M
    case 32:
84
3.81M
      subtract_block_32xn_avx2(rows, diff_ptr, diff_stride, src_ptr, src_stride,
85
3.81M
                               pred_ptr, pred_stride);
86
3.81M
      break;
87
97.9k
    case 64:
88
97.9k
      subtract_block_64xn_avx2(rows, diff_ptr, diff_stride, src_ptr, src_stride,
89
97.9k
                               pred_ptr, pred_stride);
90
97.9k
      break;
91
704M
    default:
92
704M
#if HAVE_X86_ASM
93
704M
      vpx_subtract_block_sse2(rows, cols, diff_ptr, diff_stride, src_ptr,
94
704M
                              src_stride, pred_ptr, pred_stride);
95
#else
96
      vpx_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr,
97
                           src_stride, pred_ptr, pred_stride);
98
#endif
99
704M
      break;
100
733M
  }
101
733M
}
102
103
#if CONFIG_VP9_HIGHBITDEPTH
104
void vpx_highbd_subtract_block_avx2(int rows, int cols, int16_t *diff_ptr,
105
                                    ptrdiff_t diff_stride,
106
                                    const uint8_t *src8_ptr,
107
                                    ptrdiff_t src_stride,
108
                                    const uint8_t *pred8_ptr,
109
0
                                    ptrdiff_t pred_stride, int bd) {
110
0
  uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src8_ptr);
111
0
  uint16_t *pred_ptr = CONVERT_TO_SHORTPTR(pred8_ptr);
112
0
  (void)bd;
113
0
  if (cols == 64) {
114
0
    int j = rows;
115
0
    do {
116
0
      const __m256i s0 = _mm256_lddqu_si256((const __m256i *)src_ptr);
117
0
      const __m256i s1 = _mm256_lddqu_si256((const __m256i *)(src_ptr + 16));
118
0
      const __m256i s2 = _mm256_lddqu_si256((const __m256i *)(src_ptr + 32));
119
0
      const __m256i s3 = _mm256_lddqu_si256((const __m256i *)(src_ptr + 48));
120
0
      const __m256i p0 = _mm256_lddqu_si256((const __m256i *)pred_ptr);
121
0
      const __m256i p1 = _mm256_lddqu_si256((const __m256i *)(pred_ptr + 16));
122
0
      const __m256i p2 = _mm256_lddqu_si256((const __m256i *)(pred_ptr + 32));
123
0
      const __m256i p3 = _mm256_lddqu_si256((const __m256i *)(pred_ptr + 48));
124
0
      const __m256i d0 = _mm256_sub_epi16(s0, p0);
125
0
      const __m256i d1 = _mm256_sub_epi16(s1, p1);
126
0
      const __m256i d2 = _mm256_sub_epi16(s2, p2);
127
0
      const __m256i d3 = _mm256_sub_epi16(s3, p3);
128
0
      _mm256_storeu_si256((__m256i *)diff_ptr, d0);
129
0
      _mm256_storeu_si256((__m256i *)(diff_ptr + 16), d1);
130
0
      _mm256_storeu_si256((__m256i *)(diff_ptr + 32), d2);
131
0
      _mm256_storeu_si256((__m256i *)(diff_ptr + 48), d3);
132
0
      src_ptr += src_stride;
133
0
      pred_ptr += pred_stride;
134
0
      diff_ptr += diff_stride;
135
0
    } while (--j != 0);
136
0
  } else if (cols == 32) {
137
0
    int j = rows;
138
0
    do {
139
0
      const __m256i s0 = _mm256_lddqu_si256((const __m256i *)src_ptr);
140
0
      const __m256i s1 = _mm256_lddqu_si256((const __m256i *)(src_ptr + 16));
141
0
      const __m256i p0 = _mm256_lddqu_si256((const __m256i *)pred_ptr);
142
0
      const __m256i p1 = _mm256_lddqu_si256((const __m256i *)(pred_ptr + 16));
143
0
      const __m256i d0 = _mm256_sub_epi16(s0, p0);
144
0
      const __m256i d1 = _mm256_sub_epi16(s1, p1);
145
0
      _mm256_storeu_si256((__m256i *)diff_ptr, d0);
146
0
      _mm256_storeu_si256((__m256i *)(diff_ptr + 16), d1);
147
0
      src_ptr += src_stride;
148
0
      pred_ptr += pred_stride;
149
0
      diff_ptr += diff_stride;
150
0
    } while (--j != 0);
151
0
  } else if (cols == 16) {
152
0
    int j = rows;
153
0
    do {
154
0
      const __m256i s0 = _mm256_lddqu_si256((const __m256i *)src_ptr);
155
0
      const __m256i s1 =
156
0
          _mm256_lddqu_si256((const __m256i *)(src_ptr + src_stride));
157
0
      const __m256i p0 = _mm256_lddqu_si256((const __m256i *)pred_ptr);
158
0
      const __m256i p1 =
159
0
          _mm256_lddqu_si256((const __m256i *)(pred_ptr + pred_stride));
160
0
      const __m256i d0 = _mm256_sub_epi16(s0, p0);
161
0
      const __m256i d1 = _mm256_sub_epi16(s1, p1);
162
0
      _mm256_storeu_si256((__m256i *)diff_ptr, d0);
163
0
      _mm256_storeu_si256((__m256i *)(diff_ptr + diff_stride), d1);
164
0
      src_ptr += src_stride << 1;
165
0
      pred_ptr += pred_stride << 1;
166
0
      diff_ptr += diff_stride << 1;
167
0
      j -= 2;
168
0
    } while (j != 0);
169
0
  } else if (cols == 8) {
170
0
    int j = rows;
171
0
    do {
172
0
      const __m128i s0 = _mm_lddqu_si128((const __m128i *)src_ptr);
173
0
      const __m128i s1 =
174
0
          _mm_lddqu_si128((const __m128i *)(src_ptr + src_stride));
175
0
      const __m128i p0 = _mm_lddqu_si128((const __m128i *)pred_ptr);
176
0
      const __m128i p1 =
177
0
          _mm_lddqu_si128((const __m128i *)(pred_ptr + pred_stride));
178
0
      const __m128i d0 = _mm_sub_epi16(s0, p0);
179
0
      const __m128i d1 = _mm_sub_epi16(s1, p1);
180
0
      _mm_storeu_si128((__m128i *)diff_ptr, d0);
181
0
      _mm_storeu_si128((__m128i *)(diff_ptr + diff_stride), d1);
182
0
      src_ptr += src_stride << 1;
183
0
      pred_ptr += pred_stride << 1;
184
0
      diff_ptr += diff_stride << 1;
185
0
      j -= 2;
186
0
    } while (j != 0);
187
0
  } else {
188
0
    int j = rows;
189
0
    assert(cols == 4);
190
0
    do {
191
0
      const __m128i s0 = _mm_loadl_epi64((const __m128i *)src_ptr);
192
0
      const __m128i s1 =
193
0
          _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride));
194
0
      const __m128i p0 = _mm_loadl_epi64((const __m128i *)pred_ptr);
195
0
      const __m128i p1 =
196
0
          _mm_loadl_epi64((const __m128i *)(pred_ptr + pred_stride));
197
0
      const __m128i d0 = _mm_sub_epi16(s0, p0);
198
0
      const __m128i d1 = _mm_sub_epi16(s1, p1);
199
0
      _mm_storel_epi64((__m128i *)diff_ptr, d0);
200
0
      _mm_storel_epi64((__m128i *)(diff_ptr + diff_stride), d1);
201
0
      src_ptr += src_stride << 1;
202
0
      pred_ptr += pred_stride << 1;
203
0
      diff_ptr += diff_stride << 1;
204
0
      j -= 2;
205
0
    } while (j != 0);
206
0
  }
207
0
}
208
#endif  // CONFIG_VP9_HIGHBITDEPTH