/src/libvpx/vpx_dsp/x86/avg_pred_sse2.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2017 The WebM project authors. All Rights Reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | |
11 | | #include <assert.h> |
12 | | #include <emmintrin.h> |
13 | | |
14 | | #include "./vpx_dsp_rtcd.h" |
15 | | #include "vpx/vpx_integer.h" |
16 | | #include "vpx_dsp/x86/mem_sse2.h" |
17 | | |
18 | | void vpx_comp_avg_pred_sse2(uint8_t *comp_pred, const uint8_t *pred, int width, |
19 | 0 | int height, const uint8_t *ref, int ref_stride) { |
20 | | /* comp_pred and pred must be 16 byte aligned. */ |
21 | 0 | assert(((intptr_t)comp_pred & 0xf) == 0); |
22 | 0 | assert(((intptr_t)pred & 0xf) == 0); |
23 | 0 | if (width > 8) { |
24 | 0 | int x, y; |
25 | 0 | for (y = 0; y < height; ++y) { |
26 | 0 | for (x = 0; x < width; x += 16) { |
27 | 0 | const __m128i p = _mm_load_si128((const __m128i *)(pred + x)); |
28 | 0 | const __m128i r = _mm_loadu_si128((const __m128i *)(ref + x)); |
29 | 0 | const __m128i avg = _mm_avg_epu8(p, r); |
30 | 0 | _mm_store_si128((__m128i *)(comp_pred + x), avg); |
31 | 0 | } |
32 | 0 | comp_pred += width; |
33 | 0 | pred += width; |
34 | 0 | ref += ref_stride; |
35 | 0 | } |
36 | 0 | } else { // width must be 4 or 8. |
37 | 0 | int i; |
38 | | // Process 16 elements at a time. comp_pred and pred have width == stride |
39 | | // and therefore live in contigious memory. 4*4, 4*8, 8*4, 8*8, and 8*16 are |
40 | | // all divisible by 16 so just ref needs to be massaged when loading. |
41 | 0 | for (i = 0; i < width * height; i += 16) { |
42 | 0 | const __m128i p = _mm_load_si128((const __m128i *)pred); |
43 | 0 | __m128i r; |
44 | 0 | __m128i avg; |
45 | 0 | if (width == ref_stride) { |
46 | 0 | r = _mm_loadu_si128((const __m128i *)ref); |
47 | 0 | ref += 16; |
48 | 0 | } else if (width == 4) { |
49 | 0 | r = _mm_set_epi32(loadu_int32(ref + 3 * ref_stride), |
50 | 0 | loadu_int32(ref + 2 * ref_stride), |
51 | 0 | loadu_int32(ref + ref_stride), loadu_int32(ref)); |
52 | |
|
53 | 0 | ref += 4 * ref_stride; |
54 | 0 | } else { |
55 | 0 | const __m128i r_0 = _mm_loadl_epi64((const __m128i *)ref); |
56 | 0 | assert(width == 8); |
57 | 0 | r = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(r_0), |
58 | 0 | (const __m64 *)(ref + ref_stride))); |
59 | |
|
60 | 0 | ref += 2 * ref_stride; |
61 | 0 | } |
62 | 0 | avg = _mm_avg_epu8(p, r); |
63 | 0 | _mm_store_si128((__m128i *)comp_pred, avg); |
64 | |
|
65 | 0 | pred += 16; |
66 | 0 | comp_pred += 16; |
67 | 0 | } |
68 | 0 | } |
69 | 0 | } |