Coverage Report

Created: 2024-09-06 07:53

/src/libvpx/vpx_dsp/x86/convolve_ssse3.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
 *  that can be found in the LICENSE file in the root of the source
6
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
9
 */
10
11
#ifndef VPX_VPX_DSP_X86_CONVOLVE_SSSE3_H_
12
#define VPX_VPX_DSP_X86_CONVOLVE_SSSE3_H_
13
14
#include <assert.h>
15
#include <tmmintrin.h>  // SSSE3
16
17
#include "./vpx_config.h"
18
19
static INLINE void shuffle_filter_ssse3(const int16_t *const filter,
20
0
                                        __m128i *const f) {
21
0
  const __m128i f_values = _mm_load_si128((const __m128i *)filter);
22
  // pack and duplicate the filter values
23
0
  f[0] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0200u));
24
0
  f[1] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0604u));
25
0
  f[2] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0a08u));
26
0
  f[3] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0e0cu));
27
0
}
Unexecuted instantiation: vp9_frame_scale_ssse3.c:shuffle_filter_ssse3
Unexecuted instantiation: vpx_subpixel_8t_intrin_avx2.c:shuffle_filter_ssse3
Unexecuted instantiation: vpx_subpixel_8t_intrin_ssse3.c:shuffle_filter_ssse3
28
29
static INLINE void shuffle_filter_odd_ssse3(const int16_t *const filter,
30
0
                                            __m128i *const f) {
31
0
  const __m128i f_values = _mm_load_si128((const __m128i *)filter);
32
  // pack and duplicate the filter values
33
  // It utilizes the fact that the high byte of filter[3] is always 0 to clean
34
  // half of f[0] and f[4].
35
0
  assert(filter[3] >= 0 && filter[3] < 256);
36
0
  f[0] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0007u));
37
0
  f[1] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0402u));
38
0
  f[2] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0806u));
39
0
  f[3] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0c0au));
40
0
  f[4] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x070eu));
41
0
}
Unexecuted instantiation: vp9_frame_scale_ssse3.c:shuffle_filter_odd_ssse3
Unexecuted instantiation: vpx_subpixel_8t_intrin_avx2.c:shuffle_filter_odd_ssse3
Unexecuted instantiation: vpx_subpixel_8t_intrin_ssse3.c:shuffle_filter_odd_ssse3
42
43
static INLINE __m128i convolve8_8_ssse3(const __m128i *const s,
44
7.85M
                                        const __m128i *const f) {
45
  // multiply 2 adjacent elements with the filter and add the result
46
7.85M
  const __m128i k_64 = _mm_set1_epi16(1 << 6);
47
7.85M
  const __m128i x0 = _mm_maddubs_epi16(s[0], f[0]);
48
7.85M
  const __m128i x1 = _mm_maddubs_epi16(s[1], f[1]);
49
7.85M
  const __m128i x2 = _mm_maddubs_epi16(s[2], f[2]);
50
7.85M
  const __m128i x3 = _mm_maddubs_epi16(s[3], f[3]);
51
7.85M
  __m128i sum1, sum2;
52
53
  // sum the results together, saturating only on the final step
54
  // adding x0 with x2 and x1 with x3 is the only order that prevents
55
  // outranges for all filters
56
7.85M
  sum1 = _mm_add_epi16(x0, x2);
57
7.85M
  sum2 = _mm_add_epi16(x1, x3);
58
  // add the rounding offset early to avoid another saturated add
59
7.85M
  sum1 = _mm_add_epi16(sum1, k_64);
60
7.85M
  sum1 = _mm_adds_epi16(sum1, sum2);
61
  // shift by 7 bit each 16 bit
62
7.85M
  sum1 = _mm_srai_epi16(sum1, 7);
63
7.85M
  return sum1;
64
7.85M
}
Unexecuted instantiation: vp9_frame_scale_ssse3.c:convolve8_8_ssse3
vpx_subpixel_8t_intrin_avx2.c:convolve8_8_ssse3
Line
Count
Source
44
7.85M
                                        const __m128i *const f) {
45
  // multiply 2 adjacent elements with the filter and add the result
46
7.85M
  const __m128i k_64 = _mm_set1_epi16(1 << 6);
47
7.85M
  const __m128i x0 = _mm_maddubs_epi16(s[0], f[0]);
48
7.85M
  const __m128i x1 = _mm_maddubs_epi16(s[1], f[1]);
49
7.85M
  const __m128i x2 = _mm_maddubs_epi16(s[2], f[2]);
50
7.85M
  const __m128i x3 = _mm_maddubs_epi16(s[3], f[3]);
51
7.85M
  __m128i sum1, sum2;
52
53
  // sum the results together, saturating only on the final step
54
  // adding x0 with x2 and x1 with x3 is the only order that prevents
55
  // outranges for all filters
56
7.85M
  sum1 = _mm_add_epi16(x0, x2);
57
7.85M
  sum2 = _mm_add_epi16(x1, x3);
58
  // add the rounding offset early to avoid another saturated add
59
7.85M
  sum1 = _mm_add_epi16(sum1, k_64);
60
7.85M
  sum1 = _mm_adds_epi16(sum1, sum2);
61
  // shift by 7 bit each 16 bit
62
7.85M
  sum1 = _mm_srai_epi16(sum1, 7);
63
7.85M
  return sum1;
64
7.85M
}
Unexecuted instantiation: vpx_subpixel_8t_intrin_ssse3.c:convolve8_8_ssse3
65
66
static INLINE __m128i convolve8_8_even_offset_ssse3(const __m128i *const s,
67
0
                                                    const __m128i *const f) {
68
  // multiply 2 adjacent elements with the filter and add the result
69
0
  const __m128i k_64 = _mm_set1_epi16(1 << 6);
70
0
  const __m128i x0 = _mm_maddubs_epi16(s[0], f[0]);
71
0
  const __m128i x1 = _mm_maddubs_epi16(s[1], f[1]);
72
0
  const __m128i x2 = _mm_maddubs_epi16(s[2], f[2]);
73
0
  const __m128i x3 = _mm_maddubs_epi16(s[3], f[3]);
74
  // compensate the subtracted 64 in f[1]. x4 is always non negative.
75
0
  const __m128i x4 = _mm_maddubs_epi16(s[1], _mm_set1_epi8(64));
76
  // add and saturate the results together
77
0
  __m128i temp = _mm_adds_epi16(x0, x3);
78
0
  temp = _mm_adds_epi16(temp, x1);
79
0
  temp = _mm_adds_epi16(temp, x2);
80
0
  temp = _mm_adds_epi16(temp, x4);
81
  // round and shift by 7 bit each 16 bit
82
0
  temp = _mm_adds_epi16(temp, k_64);
83
0
  temp = _mm_srai_epi16(temp, 7);
84
0
  return temp;
85
0
}
Unexecuted instantiation: vp9_frame_scale_ssse3.c:convolve8_8_even_offset_ssse3
Unexecuted instantiation: vpx_subpixel_8t_intrin_avx2.c:convolve8_8_even_offset_ssse3
Unexecuted instantiation: vpx_subpixel_8t_intrin_ssse3.c:convolve8_8_even_offset_ssse3
86
87
static INLINE __m128i convolve8_8_odd_offset_ssse3(const __m128i *const s,
88
0
                                                   const __m128i *const f) {
89
  // multiply 2 adjacent elements with the filter and add the result
90
0
  const __m128i k_64 = _mm_set1_epi16(1 << 6);
91
0
  const __m128i x0 = _mm_maddubs_epi16(s[0], f[0]);
92
0
  const __m128i x1 = _mm_maddubs_epi16(s[1], f[1]);
93
0
  const __m128i x2 = _mm_maddubs_epi16(s[2], f[2]);
94
0
  const __m128i x3 = _mm_maddubs_epi16(s[3], f[3]);
95
0
  const __m128i x4 = _mm_maddubs_epi16(s[4], f[4]);
96
  // compensate the subtracted 64 in f[2]. x5 is always non negative.
97
0
  const __m128i x5 = _mm_maddubs_epi16(s[2], _mm_set1_epi8(64));
98
0
  __m128i temp;
99
100
  // add and saturate the results together
101
0
  temp = _mm_adds_epi16(x0, x1);
102
0
  temp = _mm_adds_epi16(temp, x2);
103
0
  temp = _mm_adds_epi16(temp, x3);
104
0
  temp = _mm_adds_epi16(temp, x4);
105
0
  temp = _mm_adds_epi16(temp, x5);
106
  // round and shift by 7 bit each 16 bit
107
0
  temp = _mm_adds_epi16(temp, k_64);
108
0
  temp = _mm_srai_epi16(temp, 7);
109
0
  return temp;
110
0
}
Unexecuted instantiation: vp9_frame_scale_ssse3.c:convolve8_8_odd_offset_ssse3
Unexecuted instantiation: vpx_subpixel_8t_intrin_avx2.c:convolve8_8_odd_offset_ssse3
Unexecuted instantiation: vpx_subpixel_8t_intrin_ssse3.c:convolve8_8_odd_offset_ssse3
111
112
#endif  // VPX_VPX_DSP_X86_CONVOLVE_SSSE3_H_