/src/ffmpeg/libavcodec/x86/mpeg4videodsp.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * This file is part of FFmpeg. |
3 | | * |
4 | | * FFmpeg is free software; you can redistribute it and/or |
5 | | * modify it under the terms of the GNU Lesser General Public |
6 | | * License as published by the Free Software Foundation; either |
7 | | * version 2.1 of the License, or (at your option) any later version. |
8 | | * |
9 | | * FFmpeg is distributed in the hope that it will be useful, |
10 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | | * Lesser General Public License for more details. |
13 | | * |
14 | | * You should have received a copy of the GNU Lesser General Public |
15 | | * License along with FFmpeg; if not, write to the Free Software |
16 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
17 | | */ |
18 | | |
19 | | #include "config.h" |
20 | | #include "libavutil/attributes.h" |
21 | | #include "libavutil/cpu.h" |
22 | | #include "libavutil/x86/cpu.h" |
23 | | #include "libavcodec/mpeg4videodsp.h" |
24 | | #include "libavcodec/videodsp.h" |
25 | | |
26 | | #if HAVE_INLINE_ASM |
27 | | |
28 | | static void gmc_mmx(uint8_t *dst, const uint8_t *src, |
29 | | int stride, int h, int ox, int oy, |
30 | | int dxx, int dxy, int dyx, int dyy, |
31 | | int shift, int r, int width, int height) |
32 | 0 | { |
33 | 0 | const int w = 8; |
34 | 0 | const int ix = ox >> (16 + shift); |
35 | 0 | const int iy = oy >> (16 + shift); |
36 | 0 | const int oxs = ox >> 4; |
37 | 0 | const int oys = oy >> 4; |
38 | 0 | const int dxxs = dxx >> 4; |
39 | 0 | const int dxys = dxy >> 4; |
40 | 0 | const int dyxs = dyx >> 4; |
41 | 0 | const int dyys = dyy >> 4; |
42 | 0 | const uint16_t r4[4] = { r, r, r, r }; |
43 | 0 | const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys }; |
44 | 0 | const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys }; |
45 | 0 | const uint64_t shift2 = 2 * shift; |
46 | 0 | #define MAX_STRIDE 4096U |
47 | 0 | #define MAX_H 8U |
48 | 0 | uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE]; |
49 | 0 | int x, y; |
50 | |
|
51 | 0 | const int dxw = (dxx - (1 << (16 + shift))) * (w - 1); |
52 | 0 | const int dyh = (dyy - (1 << (16 + shift))) * (h - 1); |
53 | 0 | const int dxh = dxy * (h - 1); |
54 | 0 | const int dyw = dyx * (w - 1); |
55 | 0 | int need_emu = (unsigned) ix >= width - w || width < w || |
56 | 0 | (unsigned) iy >= height - h || height< h |
57 | 0 | ; |
58 | |
|
59 | 0 | if ( // non-constant fullpel offset (3% of blocks) |
60 | 0 | ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) | |
61 | 0 | (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) || |
62 | | // uses more than 16 bits of subpel mv (only at huge resolution) |
63 | 0 | (dxx | dxy | dyx | dyy) & 15 || |
64 | 0 | (need_emu && (h > MAX_H || stride > MAX_STRIDE))) { |
65 | | // FIXME could still use mmx for some of the rows |
66 | 0 | ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, |
67 | 0 | shift, r, width, height); |
68 | 0 | return; |
69 | 0 | } |
70 | | |
71 | 0 | src += ix + iy * stride; |
72 | 0 | if (need_emu) { |
73 | 0 | ff_emulated_edge_mc_8(edge_buf, src, stride, stride, w + 1, h + 1, ix, iy, width, height); |
74 | 0 | src = edge_buf; |
75 | 0 | } |
76 | |
|
77 | 0 | __asm__ volatile ( |
78 | 0 | "movd %0, %%mm6 \n\t" |
79 | 0 | "pxor %%mm7, %%mm7 \n\t" |
80 | 0 | "punpcklwd %%mm6, %%mm6 \n\t" |
81 | 0 | "punpcklwd %%mm6, %%mm6 \n\t" |
82 | 0 | :: "r" (1 << shift)); |
83 | |
|
84 | 0 | for (x = 0; x < w; x += 4) { |
85 | 0 | uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0), |
86 | 0 | oxs - dxys + dxxs * (x + 1), |
87 | 0 | oxs - dxys + dxxs * (x + 2), |
88 | 0 | oxs - dxys + dxxs * (x + 3) }; |
89 | 0 | uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0), |
90 | 0 | oys - dyys + dyxs * (x + 1), |
91 | 0 | oys - dyys + dyxs * (x + 2), |
92 | 0 | oys - dyys + dyxs * (x + 3) }; |
93 | |
|
94 | 0 | for (y = 0; y < h; y++) { |
95 | 0 | __asm__ volatile ( |
96 | 0 | "movq %0, %%mm4 \n\t" |
97 | 0 | "movq %1, %%mm5 \n\t" |
98 | 0 | "paddw %2, %%mm4 \n\t" |
99 | 0 | "paddw %3, %%mm5 \n\t" |
100 | 0 | "movq %%mm4, %0 \n\t" |
101 | 0 | "movq %%mm5, %1 \n\t" |
102 | 0 | "psrlw $12, %%mm4 \n\t" |
103 | 0 | "psrlw $12, %%mm5 \n\t" |
104 | 0 | : "+m" (*dx4), "+m" (*dy4) |
105 | 0 | : "m" (*dxy4), "m" (*dyy4)); |
106 | |
|
107 | 0 | __asm__ volatile ( |
108 | 0 | "movq %%mm6, %%mm2 \n\t" |
109 | 0 | "movq %%mm6, %%mm1 \n\t" |
110 | 0 | "psubw %%mm4, %%mm2 \n\t" |
111 | 0 | "psubw %%mm5, %%mm1 \n\t" |
112 | 0 | "movq %%mm2, %%mm0 \n\t" |
113 | 0 | "movq %%mm4, %%mm3 \n\t" |
114 | 0 | "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy) |
115 | 0 | "pmullw %%mm5, %%mm3 \n\t" // dx * dy |
116 | 0 | "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy |
117 | 0 | "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy) |
118 | |
|
119 | 0 | "movd %4, %%mm5 \n\t" |
120 | 0 | "movd %3, %%mm4 \n\t" |
121 | 0 | "punpcklbw %%mm7, %%mm5 \n\t" |
122 | 0 | "punpcklbw %%mm7, %%mm4 \n\t" |
123 | 0 | "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy |
124 | 0 | "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy |
125 | |
|
126 | 0 | "movd %2, %%mm5 \n\t" |
127 | 0 | "movd %1, %%mm4 \n\t" |
128 | 0 | "punpcklbw %%mm7, %%mm5 \n\t" |
129 | 0 | "punpcklbw %%mm7, %%mm4 \n\t" |
130 | 0 | "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy) |
131 | 0 | "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy) |
132 | 0 | "paddw %5, %%mm1 \n\t" |
133 | 0 | "paddw %%mm3, %%mm2 \n\t" |
134 | 0 | "paddw %%mm1, %%mm0 \n\t" |
135 | 0 | "paddw %%mm2, %%mm0 \n\t" |
136 | |
|
137 | 0 | "psrlw %6, %%mm0 \n\t" |
138 | 0 | "packuswb %%mm0, %%mm0 \n\t" |
139 | 0 | "movd %%mm0, %0 \n\t" |
140 | |
|
141 | 0 | : "=m" (dst[x + y * stride]) |
142 | 0 | : "m" (src[0]), "m" (src[1]), |
143 | 0 | "m" (src[stride]), "m" (src[stride + 1]), |
144 | 0 | "m" (*r4), "m" (shift2)); |
145 | 0 | src += stride; |
146 | 0 | } |
147 | 0 | src += 4 - h * stride; |
148 | 0 | } |
149 | 0 | } |
150 | | |
151 | | #endif /* HAVE_INLINE_ASM */ |
152 | | |
153 | | av_cold void ff_mpeg4videodsp_init_x86(Mpeg4VideoDSPContext *c) |
154 | 0 | { |
155 | 0 | #if HAVE_INLINE_ASM |
156 | 0 | int cpu_flags = av_get_cpu_flags(); |
157 | |
|
158 | 0 | if (INLINE_MMX(cpu_flags)) |
159 | 0 | c->gmc = gmc_mmx; |
160 | 0 | #endif /* HAVE_INLINE_ASM */ |
161 | 0 | } |