/src/ffmpeg/libavcodec/rectangle.h
Line | Count | Source |
1 | | /* |
2 | | * rectangle filling function |
3 | | * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> |
4 | | * |
5 | | * This file is part of FFmpeg. |
6 | | * |
7 | | * FFmpeg is free software; you can redistribute it and/or |
8 | | * modify it under the terms of the GNU Lesser General Public |
9 | | * License as published by the Free Software Foundation; either |
10 | | * version 2.1 of the License, or (at your option) any later version. |
11 | | * |
12 | | * FFmpeg is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | | * Lesser General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU Lesser General Public |
18 | | * License along with FFmpeg; if not, write to the Free Software |
19 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 | | */ |
21 | | |
22 | | /** |
23 | | * @file |
24 | | * useful rectangle filling function |
25 | | * @author Michael Niedermayer <michaelni@gmx.at> |
26 | | */ |
27 | | |
28 | | #ifndef AVCODEC_RECTANGLE_H |
29 | | #define AVCODEC_RECTANGLE_H |
30 | | |
31 | | #include "config.h" |
32 | | #include "libavutil/common.h" |
33 | | #include "libavutil/avassert.h" |
34 | | |
35 | | /** |
36 | | * fill a rectangle. |
37 | | * @param h height of the rectangle, should be a constant |
38 | | * @param w width of the rectangle, should be a constant |
39 | | * @param size the size of val (1, 2 or 4), should be a constant |
40 | | */ |
41 | 451M | static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ |
42 | 451M | uint8_t *p= (uint8_t*)vp; |
43 | 451M | av_assert2(size==1 || size==2 || size==4); |
44 | 451M | av_assert2(w<=4); |
45 | | |
46 | 451M | w *= size; |
47 | 451M | stride *= size; |
48 | | |
49 | 451M | av_assert2((((uintptr_t)vp)&(FFMIN(w, 8<<(HAVE_NEON|ARCH_PPC|HAVE_MMX))-1)) == 0); |
50 | 451M | av_assert2((stride&(w-1))==0); |
51 | 451M | if(w==2){ |
52 | 144M | const uint16_t v= size==4 ? val : val*0x0101; |
53 | 144M | *(uint16_t*)(p + 0*stride)= v; |
54 | 144M | if(h==1) return; |
55 | 144M | *(uint16_t*)(p + 1*stride)= v; |
56 | 144M | if(h==2) return; |
57 | 3.21M | *(uint16_t*)(p + 2*stride)= v; |
58 | 3.21M | *(uint16_t*)(p + 3*stride)= v; |
59 | 307M | }else if(w==4){ |
60 | 146M | const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; |
61 | 146M | *(uint32_t*)(p + 0*stride)= v; |
62 | 146M | if(h==1) return; |
63 | 145M | *(uint32_t*)(p + 1*stride)= v; |
64 | 145M | if(h==2) return; |
65 | 131M | *(uint32_t*)(p + 2*stride)= v; |
66 | 131M | *(uint32_t*)(p + 3*stride)= v; |
67 | 160M | }else if(w==8){ |
68 | | // gcc cannot optimize 64-bit math on x86_32 |
69 | 71.8M | #if HAVE_FAST_64BIT |
70 | 71.8M | const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; |
71 | 71.8M | *(uint64_t*)(p + 0*stride)= v; |
72 | 71.8M | if(h==1) return; |
73 | 71.8M | *(uint64_t*)(p + 1*stride)= v; |
74 | 71.8M | if(h==2) return; |
75 | 9.46M | *(uint64_t*)(p + 2*stride)= v; |
76 | 9.46M | *(uint64_t*)(p + 3*stride)= v; |
77 | 88.8M | }else if(w==16){ |
78 | 88.8M | const uint64_t v= val*0x0100000001ULL; |
79 | 88.8M | *(uint64_t*)(p + 0+0*stride)= v; |
80 | 88.8M | *(uint64_t*)(p + 8+0*stride)= v; |
81 | 88.8M | *(uint64_t*)(p + 0+1*stride)= v; |
82 | 88.8M | *(uint64_t*)(p + 8+1*stride)= v; |
83 | 88.8M | if(h==2) return; |
84 | 85.0M | *(uint64_t*)(p + 0+2*stride)= v; |
85 | 85.0M | *(uint64_t*)(p + 8+2*stride)= v; |
86 | 85.0M | *(uint64_t*)(p + 0+3*stride)= v; |
87 | 85.0M | *(uint64_t*)(p + 8+3*stride)= v; |
88 | | #else |
89 | | const uint32_t v= size==2 ? val*0x00010001 : val; |
90 | | *(uint32_t*)(p + 0+0*stride)= v; |
91 | | *(uint32_t*)(p + 4+0*stride)= v; |
92 | | if(h==1) return; |
93 | | *(uint32_t*)(p + 0+1*stride)= v; |
94 | | *(uint32_t*)(p + 4+1*stride)= v; |
95 | | if(h==2) return; |
96 | | *(uint32_t*)(p + 0+2*stride)= v; |
97 | | *(uint32_t*)(p + 4+2*stride)= v; |
98 | | *(uint32_t*)(p + 0+3*stride)= v; |
99 | | *(uint32_t*)(p + 4+3*stride)= v; |
100 | | }else if(w==16){ |
101 | | *(uint32_t*)(p + 0+0*stride)= val; |
102 | | *(uint32_t*)(p + 4+0*stride)= val; |
103 | | *(uint32_t*)(p + 8+0*stride)= val; |
104 | | *(uint32_t*)(p +12+0*stride)= val; |
105 | | *(uint32_t*)(p + 0+1*stride)= val; |
106 | | *(uint32_t*)(p + 4+1*stride)= val; |
107 | | *(uint32_t*)(p + 8+1*stride)= val; |
108 | | *(uint32_t*)(p +12+1*stride)= val; |
109 | | if(h==2) return; |
110 | | *(uint32_t*)(p + 0+2*stride)= val; |
111 | | *(uint32_t*)(p + 4+2*stride)= val; |
112 | | *(uint32_t*)(p + 8+2*stride)= val; |
113 | | *(uint32_t*)(p +12+2*stride)= val; |
114 | | *(uint32_t*)(p + 0+3*stride)= val; |
115 | | *(uint32_t*)(p + 4+3*stride)= val; |
116 | | *(uint32_t*)(p + 8+3*stride)= val; |
117 | | *(uint32_t*)(p +12+3*stride)= val; |
118 | | #endif |
119 | 85.0M | }else |
120 | 0 | av_assert2(0); |
121 | 228M | av_assert2(h==4); |
122 | 228M | } Line | Count | Source | 41 | 152M | static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ | 42 | 152M | uint8_t *p= (uint8_t*)vp; | 43 | 152M | av_assert2(size==1 || size==2 || size==4); | 44 | 152M | av_assert2(w<=4); | 45 | | | 46 | 152M | w *= size; | 47 | 152M | stride *= size; | 48 | | | 49 | 152M | av_assert2((((uintptr_t)vp)&(FFMIN(w, 8<<(HAVE_NEON|ARCH_PPC|HAVE_MMX))-1)) == 0); | 50 | 152M | av_assert2((stride&(w-1))==0); | 51 | 152M | if(w==2){ | 52 | 50.7M | const uint16_t v= size==4 ? val : val*0x0101; | 53 | 50.7M | *(uint16_t*)(p + 0*stride)= v; | 54 | 50.7M | if(h==1) return; | 55 | 50.7M | *(uint16_t*)(p + 1*stride)= v; | 56 | 50.7M | if(h==2) return; | 57 | 0 | *(uint16_t*)(p + 2*stride)= v; | 58 | 0 | *(uint16_t*)(p + 3*stride)= v; | 59 | 101M | }else if(w==4){ | 60 | 50.7M | const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; | 61 | 50.7M | *(uint32_t*)(p + 0*stride)= v; | 62 | 50.7M | if(h==1) return; | 63 | 50.7M | *(uint32_t*)(p + 1*stride)= v; | 64 | 50.7M | if(h==2) return; | 65 | 50.7M | *(uint32_t*)(p + 2*stride)= v; | 66 | 50.7M | *(uint32_t*)(p + 3*stride)= v; | 67 | 50.7M | }else if(w==8){ | 68 | | // gcc cannot optimize 64-bit math on x86_32 | 69 | 0 | #if HAVE_FAST_64BIT | 70 | 0 | const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; | 71 | 0 | *(uint64_t*)(p + 0*stride)= v; | 72 | 0 | if(h==1) return; | 73 | 0 | *(uint64_t*)(p + 1*stride)= v; | 74 | 0 | if(h==2) return; | 75 | 0 | *(uint64_t*)(p + 2*stride)= v; | 76 | 0 | *(uint64_t*)(p + 3*stride)= v; | 77 | 50.7M | }else if(w==16){ | 78 | 50.7M | const uint64_t v= val*0x0100000001ULL; | 79 | 50.7M | *(uint64_t*)(p + 0+0*stride)= v; | 80 | 50.7M | *(uint64_t*)(p + 8+0*stride)= v; | 81 | 50.7M | *(uint64_t*)(p + 0+1*stride)= v; | 82 | 50.7M | *(uint64_t*)(p + 8+1*stride)= v; | 83 | 50.7M | if(h==2) return; | 84 | 50.7M | *(uint64_t*)(p + 0+2*stride)= v; | 85 | 50.7M | *(uint64_t*)(p + 8+2*stride)= v; | 86 | 50.7M | *(uint64_t*)(p + 0+3*stride)= v; | 87 | 50.7M | *(uint64_t*)(p + 8+3*stride)= v; | 88 | | #else | 89 | | const uint32_t v= size==2 ? val*0x00010001 : val; | 90 | | *(uint32_t*)(p + 0+0*stride)= v; | 91 | | *(uint32_t*)(p + 4+0*stride)= v; | 92 | | if(h==1) return; | 93 | | *(uint32_t*)(p + 0+1*stride)= v; | 94 | | *(uint32_t*)(p + 4+1*stride)= v; | 95 | | if(h==2) return; | 96 | | *(uint32_t*)(p + 0+2*stride)= v; | 97 | | *(uint32_t*)(p + 4+2*stride)= v; | 98 | | *(uint32_t*)(p + 0+3*stride)= v; | 99 | | *(uint32_t*)(p + 4+3*stride)= v; | 100 | | }else if(w==16){ | 101 | | *(uint32_t*)(p + 0+0*stride)= val; | 102 | | *(uint32_t*)(p + 4+0*stride)= val; | 103 | | *(uint32_t*)(p + 8+0*stride)= val; | 104 | | *(uint32_t*)(p +12+0*stride)= val; | 105 | | *(uint32_t*)(p + 0+1*stride)= val; | 106 | | *(uint32_t*)(p + 4+1*stride)= val; | 107 | | *(uint32_t*)(p + 8+1*stride)= val; | 108 | | *(uint32_t*)(p +12+1*stride)= val; | 109 | | if(h==2) return; | 110 | | *(uint32_t*)(p + 0+2*stride)= val; | 111 | | *(uint32_t*)(p + 4+2*stride)= val; | 112 | | *(uint32_t*)(p + 8+2*stride)= val; | 113 | | *(uint32_t*)(p +12+2*stride)= val; | 114 | | *(uint32_t*)(p + 0+3*stride)= val; | 115 | | *(uint32_t*)(p + 4+3*stride)= val; | 116 | | *(uint32_t*)(p + 8+3*stride)= val; | 117 | | *(uint32_t*)(p +12+3*stride)= val; | 118 | | #endif | 119 | 50.7M | }else | 120 | 0 | av_assert2(0); | 121 | 101M | av_assert2(h==4); | 122 | 101M | } |
h264_cavlc.c:fill_rectangle Line | Count | Source | 41 | 14.4M | static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ | 42 | 14.4M | uint8_t *p= (uint8_t*)vp; | 43 | 14.4M | av_assert2(size==1 || size==2 || size==4); | 44 | 14.4M | av_assert2(w<=4); | 45 | | | 46 | 14.4M | w *= size; | 47 | 14.4M | stride *= size; | 48 | | | 49 | 14.4M | av_assert2((((uintptr_t)vp)&(FFMIN(w, 8<<(HAVE_NEON|ARCH_PPC|HAVE_MMX))-1)) == 0); | 50 | 14.4M | av_assert2((stride&(w-1))==0); | 51 | 14.4M | if(w==2){ | 52 | 910k | const uint16_t v= size==4 ? val : val*0x0101; | 53 | 910k | *(uint16_t*)(p + 0*stride)= v; | 54 | 910k | if(h==1) return; | 55 | 910k | *(uint16_t*)(p + 1*stride)= v; | 56 | 910k | if(h==2) return; | 57 | 437k | *(uint16_t*)(p + 2*stride)= v; | 58 | 437k | *(uint16_t*)(p + 3*stride)= v; | 59 | 13.4M | }else if(w==4){ | 60 | 8.72M | const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; | 61 | 8.72M | *(uint32_t*)(p + 0*stride)= v; | 62 | 8.72M | if(h==1) return; | 63 | 8.72M | *(uint32_t*)(p + 1*stride)= v; | 64 | 8.72M | if(h==2) return; | 65 | 8.30M | *(uint32_t*)(p + 2*stride)= v; | 66 | 8.30M | *(uint32_t*)(p + 3*stride)= v; | 67 | 8.30M | }else if(w==8){ | 68 | | // gcc cannot optimize 64-bit math on x86_32 | 69 | 436k | #if HAVE_FAST_64BIT | 70 | 436k | const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; | 71 | 436k | *(uint64_t*)(p + 0*stride)= v; | 72 | 436k | if(h==1) return; | 73 | 436k | *(uint64_t*)(p + 1*stride)= v; | 74 | 436k | if(h==2) return; | 75 | 436k | *(uint64_t*)(p + 2*stride)= v; | 76 | 436k | *(uint64_t*)(p + 3*stride)= v; | 77 | 4.33M | }else if(w==16){ | 78 | 4.33M | const uint64_t v= val*0x0100000001ULL; | 79 | 4.33M | *(uint64_t*)(p + 0+0*stride)= v; | 80 | 4.33M | *(uint64_t*)(p + 8+0*stride)= v; | 81 | 4.33M | *(uint64_t*)(p + 0+1*stride)= v; | 82 | 4.33M | *(uint64_t*)(p + 8+1*stride)= v; | 83 | 4.33M | if(h==2) return; | 84 | 3.91M | *(uint64_t*)(p + 0+2*stride)= v; | 85 | 3.91M | *(uint64_t*)(p + 8+2*stride)= v; | 86 | 3.91M | *(uint64_t*)(p + 0+3*stride)= v; | 87 | 3.91M | *(uint64_t*)(p + 8+3*stride)= v; | 88 | | #else | 89 | | const uint32_t v= size==2 ? val*0x00010001 : val; | 90 | | *(uint32_t*)(p + 0+0*stride)= v; | 91 | | *(uint32_t*)(p + 4+0*stride)= v; | 92 | | if(h==1) return; | 93 | | *(uint32_t*)(p + 0+1*stride)= v; | 94 | | *(uint32_t*)(p + 4+1*stride)= v; | 95 | | if(h==2) return; | 96 | | *(uint32_t*)(p + 0+2*stride)= v; | 97 | | *(uint32_t*)(p + 4+2*stride)= v; | 98 | | *(uint32_t*)(p + 0+3*stride)= v; | 99 | | *(uint32_t*)(p + 4+3*stride)= v; | 100 | | }else if(w==16){ | 101 | | *(uint32_t*)(p + 0+0*stride)= val; | 102 | | *(uint32_t*)(p + 4+0*stride)= val; | 103 | | *(uint32_t*)(p + 8+0*stride)= val; | 104 | | *(uint32_t*)(p +12+0*stride)= val; | 105 | | *(uint32_t*)(p + 0+1*stride)= val; | 106 | | *(uint32_t*)(p + 4+1*stride)= val; | 107 | | *(uint32_t*)(p + 8+1*stride)= val; | 108 | | *(uint32_t*)(p +12+1*stride)= val; | 109 | | if(h==2) return; | 110 | | *(uint32_t*)(p + 0+2*stride)= val; | 111 | | *(uint32_t*)(p + 4+2*stride)= val; | 112 | | *(uint32_t*)(p + 8+2*stride)= val; | 113 | | *(uint32_t*)(p +12+2*stride)= val; | 114 | | *(uint32_t*)(p + 0+3*stride)= val; | 115 | | *(uint32_t*)(p + 4+3*stride)= val; | 116 | | *(uint32_t*)(p + 8+3*stride)= val; | 117 | | *(uint32_t*)(p +12+3*stride)= val; | 118 | | #endif | 119 | 3.91M | }else | 120 | 0 | av_assert2(0); | 121 | 13.0M | av_assert2(h==4); | 122 | 13.0M | } |
h264_direct.c:fill_rectangle Line | Count | Source | 41 | 130M | static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ | 42 | 130M | uint8_t *p= (uint8_t*)vp; | 43 | 130M | av_assert2(size==1 || size==2 || size==4); | 44 | 130M | av_assert2(w<=4); | 45 | | | 46 | 130M | w *= size; | 47 | 130M | stride *= size; | 48 | | | 49 | 130M | av_assert2((((uintptr_t)vp)&(FFMIN(w, 8<<(HAVE_NEON|ARCH_PPC|HAVE_MMX))-1)) == 0); | 50 | 130M | av_assert2((stride&(w-1))==0); | 51 | 130M | if(w==2){ | 52 | 49.5M | const uint16_t v= size==4 ? val : val*0x0101; | 53 | 49.5M | *(uint16_t*)(p + 0*stride)= v; | 54 | 49.5M | if(h==1) return; | 55 | 49.5M | *(uint16_t*)(p + 1*stride)= v; | 56 | 49.5M | if(h==2) return; | 57 | 0 | *(uint16_t*)(p + 2*stride)= v; | 58 | 0 | *(uint16_t*)(p + 3*stride)= v; | 59 | 81.3M | }else if(w==4){ | 60 | 16.3M | const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; | 61 | 16.3M | *(uint32_t*)(p + 0*stride)= v; | 62 | 16.3M | if(h==1) return; | 63 | 16.3M | *(uint32_t*)(p + 1*stride)= v; | 64 | 16.3M | if(h==2) return; | 65 | 16.3M | *(uint32_t*)(p + 2*stride)= v; | 66 | 16.3M | *(uint32_t*)(p + 3*stride)= v; | 67 | 64.9M | }else if(w==8){ | 68 | | // gcc cannot optimize 64-bit math on x86_32 | 69 | 48.5M | #if HAVE_FAST_64BIT | 70 | 48.5M | const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; | 71 | 48.5M | *(uint64_t*)(p + 0*stride)= v; | 72 | 48.5M | if(h==1) return; | 73 | 48.5M | *(uint64_t*)(p + 1*stride)= v; | 74 | 48.5M | if(h==2) return; | 75 | 0 | *(uint64_t*)(p + 2*stride)= v; | 76 | 0 | *(uint64_t*)(p + 3*stride)= v; | 77 | 16.3M | }else if(w==16){ | 78 | 16.3M | const uint64_t v= val*0x0100000001ULL; | 79 | 16.3M | *(uint64_t*)(p + 0+0*stride)= v; | 80 | 16.3M | *(uint64_t*)(p + 8+0*stride)= v; | 81 | 16.3M | *(uint64_t*)(p + 0+1*stride)= v; | 82 | 16.3M | *(uint64_t*)(p + 8+1*stride)= v; | 83 | 16.3M | if(h==2) return; | 84 | 16.3M | *(uint64_t*)(p + 0+2*stride)= v; | 85 | 16.3M | *(uint64_t*)(p + 8+2*stride)= v; | 86 | 16.3M | *(uint64_t*)(p + 0+3*stride)= v; | 87 | 16.3M | *(uint64_t*)(p + 8+3*stride)= v; | 88 | | #else | 89 | | const uint32_t v= size==2 ? val*0x00010001 : val; | 90 | | *(uint32_t*)(p + 0+0*stride)= v; | 91 | | *(uint32_t*)(p + 4+0*stride)= v; | 92 | | if(h==1) return; | 93 | | *(uint32_t*)(p + 0+1*stride)= v; | 94 | | *(uint32_t*)(p + 4+1*stride)= v; | 95 | | if(h==2) return; | 96 | | *(uint32_t*)(p + 0+2*stride)= v; | 97 | | *(uint32_t*)(p + 4+2*stride)= v; | 98 | | *(uint32_t*)(p + 0+3*stride)= v; | 99 | | *(uint32_t*)(p + 4+3*stride)= v; | 100 | | }else if(w==16){ | 101 | | *(uint32_t*)(p + 0+0*stride)= val; | 102 | | *(uint32_t*)(p + 4+0*stride)= val; | 103 | | *(uint32_t*)(p + 8+0*stride)= val; | 104 | | *(uint32_t*)(p +12+0*stride)= val; | 105 | | *(uint32_t*)(p + 0+1*stride)= val; | 106 | | *(uint32_t*)(p + 4+1*stride)= val; | 107 | | *(uint32_t*)(p + 8+1*stride)= val; | 108 | | *(uint32_t*)(p +12+1*stride)= val; | 109 | | if(h==2) return; | 110 | | *(uint32_t*)(p + 0+2*stride)= val; | 111 | | *(uint32_t*)(p + 4+2*stride)= val; | 112 | | *(uint32_t*)(p + 8+2*stride)= val; | 113 | | *(uint32_t*)(p +12+2*stride)= val; | 114 | | *(uint32_t*)(p + 0+3*stride)= val; | 115 | | *(uint32_t*)(p + 4+3*stride)= val; | 116 | | *(uint32_t*)(p + 8+3*stride)= val; | 117 | | *(uint32_t*)(p +12+3*stride)= val; | 118 | | #endif | 119 | 16.3M | }else | 120 | 0 | av_assert2(0); | 121 | 32.7M | av_assert2(h==4); | 122 | 32.7M | } |
Line | Count | Source | 41 | 11.2M | static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ | 42 | 11.2M | uint8_t *p= (uint8_t*)vp; | 43 | 11.2M | av_assert2(size==1 || size==2 || size==4); | 44 | 11.2M | av_assert2(w<=4); | 45 | | | 46 | 11.2M | w *= size; | 47 | 11.2M | stride *= size; | 48 | | | 49 | 11.2M | av_assert2((((uintptr_t)vp)&(FFMIN(w, 8<<(HAVE_NEON|ARCH_PPC|HAVE_MMX))-1)) == 0); | 50 | 11.2M | av_assert2((stride&(w-1))==0); | 51 | 11.2M | if(w==2){ | 52 | 6.11M | const uint16_t v= size==4 ? val : val*0x0101; | 53 | 6.11M | *(uint16_t*)(p + 0*stride)= v; | 54 | 6.11M | if(h==1) return; | 55 | 6.11M | *(uint16_t*)(p + 1*stride)= v; | 56 | 6.11M | if(h==2) return; | 57 | 0 | *(uint16_t*)(p + 2*stride)= v; | 58 | 0 | *(uint16_t*)(p + 3*stride)= v; | 59 | 5.14M | }else if(w==4){ | 60 | 5.14M | const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; | 61 | 5.14M | *(uint32_t*)(p + 0*stride)= v; | 62 | 5.14M | if(h==1) return; | 63 | 5.14M | *(uint32_t*)(p + 1*stride)= v; | 64 | 5.14M | if(h==2) return; | 65 | 5.14M | *(uint32_t*)(p + 2*stride)= v; | 66 | 5.14M | *(uint32_t*)(p + 3*stride)= v; | 67 | 5.14M | }else if(w==8){ | 68 | | // gcc cannot optimize 64-bit math on x86_32 | 69 | 0 | #if HAVE_FAST_64BIT | 70 | 0 | const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; | 71 | 0 | *(uint64_t*)(p + 0*stride)= v; | 72 | 0 | if(h==1) return; | 73 | 0 | *(uint64_t*)(p + 1*stride)= v; | 74 | 0 | if(h==2) return; | 75 | 0 | *(uint64_t*)(p + 2*stride)= v; | 76 | 0 | *(uint64_t*)(p + 3*stride)= v; | 77 | 0 | }else if(w==16){ | 78 | 0 | const uint64_t v= val*0x0100000001ULL; | 79 | 0 | *(uint64_t*)(p + 0+0*stride)= v; | 80 | 0 | *(uint64_t*)(p + 8+0*stride)= v; | 81 | 0 | *(uint64_t*)(p + 0+1*stride)= v; | 82 | 0 | *(uint64_t*)(p + 8+1*stride)= v; | 83 | 0 | if(h==2) return; | 84 | 0 | *(uint64_t*)(p + 0+2*stride)= v; | 85 | 0 | *(uint64_t*)(p + 8+2*stride)= v; | 86 | 0 | *(uint64_t*)(p + 0+3*stride)= v; | 87 | 0 | *(uint64_t*)(p + 8+3*stride)= v; | 88 | | #else | 89 | | const uint32_t v= size==2 ? val*0x00010001 : val; | 90 | | *(uint32_t*)(p + 0+0*stride)= v; | 91 | | *(uint32_t*)(p + 4+0*stride)= v; | 92 | | if(h==1) return; | 93 | | *(uint32_t*)(p + 0+1*stride)= v; | 94 | | *(uint32_t*)(p + 4+1*stride)= v; | 95 | | if(h==2) return; | 96 | | *(uint32_t*)(p + 0+2*stride)= v; | 97 | | *(uint32_t*)(p + 4+2*stride)= v; | 98 | | *(uint32_t*)(p + 0+3*stride)= v; | 99 | | *(uint32_t*)(p + 4+3*stride)= v; | 100 | | }else if(w==16){ | 101 | | *(uint32_t*)(p + 0+0*stride)= val; | 102 | | *(uint32_t*)(p + 4+0*stride)= val; | 103 | | *(uint32_t*)(p + 8+0*stride)= val; | 104 | | *(uint32_t*)(p +12+0*stride)= val; | 105 | | *(uint32_t*)(p + 0+1*stride)= val; | 106 | | *(uint32_t*)(p + 4+1*stride)= val; | 107 | | *(uint32_t*)(p + 8+1*stride)= val; | 108 | | *(uint32_t*)(p +12+1*stride)= val; | 109 | | if(h==2) return; | 110 | | *(uint32_t*)(p + 0+2*stride)= val; | 111 | | *(uint32_t*)(p + 4+2*stride)= val; | 112 | | *(uint32_t*)(p + 8+2*stride)= val; | 113 | | *(uint32_t*)(p +12+2*stride)= val; | 114 | | *(uint32_t*)(p + 0+3*stride)= val; | 115 | | *(uint32_t*)(p + 4+3*stride)= val; | 116 | | *(uint32_t*)(p + 8+3*stride)= val; | 117 | | *(uint32_t*)(p +12+3*stride)= val; | 118 | | #endif | 119 | 0 | }else | 120 | 0 | av_assert2(0); | 121 | 5.14M | av_assert2(h==4); | 122 | 5.14M | } |
h264_slice.c:fill_rectangle Line | Count | Source | 41 | 4.95M | static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ | 42 | 4.95M | uint8_t *p= (uint8_t*)vp; | 43 | 4.95M | av_assert2(size==1 || size==2 || size==4); | 44 | 4.95M | av_assert2(w<=4); | 45 | | | 46 | 4.95M | w *= size; | 47 | 4.95M | stride *= size; | 48 | | | 49 | 4.95M | av_assert2((((uintptr_t)vp)&(FFMIN(w, 8<<(HAVE_NEON|ARCH_PPC|HAVE_MMX))-1)) == 0); | 50 | 4.95M | av_assert2((stride&(w-1))==0); | 51 | 4.95M | if(w==2){ | 52 | 0 | const uint16_t v= size==4 ? val : val*0x0101; | 53 | 0 | *(uint16_t*)(p + 0*stride)= v; | 54 | 0 | if(h==1) return; | 55 | 0 | *(uint16_t*)(p + 1*stride)= v; | 56 | 0 | if(h==2) return; | 57 | 0 | *(uint16_t*)(p + 2*stride)= v; | 58 | 0 | *(uint16_t*)(p + 3*stride)= v; | 59 | 4.95M | }else if(w==4){ | 60 | 0 | const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; | 61 | 0 | *(uint32_t*)(p + 0*stride)= v; | 62 | 0 | if(h==1) return; | 63 | 0 | *(uint32_t*)(p + 1*stride)= v; | 64 | 0 | if(h==2) return; | 65 | 0 | *(uint32_t*)(p + 2*stride)= v; | 66 | 0 | *(uint32_t*)(p + 3*stride)= v; | 67 | 4.95M | }else if(w==8){ | 68 | | // gcc cannot optimize 64-bit math on x86_32 | 69 | 0 | #if HAVE_FAST_64BIT | 70 | 0 | const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; | 71 | 0 | *(uint64_t*)(p + 0*stride)= v; | 72 | 0 | if(h==1) return; | 73 | 0 | *(uint64_t*)(p + 1*stride)= v; | 74 | 0 | if(h==2) return; | 75 | 0 | *(uint64_t*)(p + 2*stride)= v; | 76 | 0 | *(uint64_t*)(p + 3*stride)= v; | 77 | 4.95M | }else if(w==16){ | 78 | 4.95M | const uint64_t v= val*0x0100000001ULL; | 79 | 4.95M | *(uint64_t*)(p + 0+0*stride)= v; | 80 | 4.95M | *(uint64_t*)(p + 8+0*stride)= v; | 81 | 4.95M | *(uint64_t*)(p + 0+1*stride)= v; | 82 | 4.95M | *(uint64_t*)(p + 8+1*stride)= v; | 83 | 4.95M | if(h==2) return; | 84 | 4.95M | *(uint64_t*)(p + 0+2*stride)= v; | 85 | 4.95M | *(uint64_t*)(p + 8+2*stride)= v; | 86 | 4.95M | *(uint64_t*)(p + 0+3*stride)= v; | 87 | 4.95M | *(uint64_t*)(p + 8+3*stride)= v; | 88 | | #else | 89 | | const uint32_t v= size==2 ? val*0x00010001 : val; | 90 | | *(uint32_t*)(p + 0+0*stride)= v; | 91 | | *(uint32_t*)(p + 4+0*stride)= v; | 92 | | if(h==1) return; | 93 | | *(uint32_t*)(p + 0+1*stride)= v; | 94 | | *(uint32_t*)(p + 4+1*stride)= v; | 95 | | if(h==2) return; | 96 | | *(uint32_t*)(p + 0+2*stride)= v; | 97 | | *(uint32_t*)(p + 4+2*stride)= v; | 98 | | *(uint32_t*)(p + 0+3*stride)= v; | 99 | | *(uint32_t*)(p + 4+3*stride)= v; | 100 | | }else if(w==16){ | 101 | | *(uint32_t*)(p + 0+0*stride)= val; | 102 | | *(uint32_t*)(p + 4+0*stride)= val; | 103 | | *(uint32_t*)(p + 8+0*stride)= val; | 104 | | *(uint32_t*)(p +12+0*stride)= val; | 105 | | *(uint32_t*)(p + 0+1*stride)= val; | 106 | | *(uint32_t*)(p + 4+1*stride)= val; | 107 | | *(uint32_t*)(p + 8+1*stride)= val; | 108 | | *(uint32_t*)(p +12+1*stride)= val; | 109 | | if(h==2) return; | 110 | | *(uint32_t*)(p + 0+2*stride)= val; | 111 | | *(uint32_t*)(p + 4+2*stride)= val; | 112 | | *(uint32_t*)(p + 8+2*stride)= val; | 113 | | *(uint32_t*)(p +12+2*stride)= val; | 114 | | *(uint32_t*)(p + 0+3*stride)= val; | 115 | | *(uint32_t*)(p + 4+3*stride)= val; | 116 | | *(uint32_t*)(p + 8+3*stride)= val; | 117 | | *(uint32_t*)(p +12+3*stride)= val; | 118 | | #endif | 119 | 4.95M | }else | 120 | 0 | av_assert2(0); | 121 | 4.95M | av_assert2(h==4); | 122 | 4.95M | } |
h264_cabac.c:fill_rectangle Line | Count | Source | 41 | 113M | static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ | 42 | 113M | uint8_t *p= (uint8_t*)vp; | 43 | 113M | av_assert2(size==1 || size==2 || size==4); | 44 | 113M | av_assert2(w<=4); | 45 | | | 46 | 113M | w *= size; | 47 | 113M | stride *= size; | 48 | | | 49 | 113M | av_assert2((((uintptr_t)vp)&(FFMIN(w, 8<<(HAVE_NEON|ARCH_PPC|HAVE_MMX))-1)) == 0); | 50 | 113M | av_assert2((stride&(w-1))==0); | 51 | 113M | if(w==2){ | 52 | 36.9M | const uint16_t v= size==4 ? val : val*0x0101; | 53 | 36.9M | *(uint16_t*)(p + 0*stride)= v; | 54 | 36.9M | if(h==1) return; | 55 | 36.9M | *(uint16_t*)(p + 1*stride)= v; | 56 | 36.9M | if(h==2) return; | 57 | 2.77M | *(uint16_t*)(p + 2*stride)= v; | 58 | 2.77M | *(uint16_t*)(p + 3*stride)= v; | 59 | 76.4M | }else if(w==4){ | 60 | 50.9M | const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; | 61 | 50.9M | *(uint32_t*)(p + 0*stride)= v; | 62 | 50.9M | if(h==1) return; | 63 | 50.9M | *(uint32_t*)(p + 1*stride)= v; | 64 | 50.9M | if(h==2) return; | 65 | 45.5M | *(uint32_t*)(p + 2*stride)= v; | 66 | 45.5M | *(uint32_t*)(p + 3*stride)= v; | 67 | 45.5M | }else if(w==8){ | 68 | | // gcc cannot optimize 64-bit math on x86_32 | 69 | 13.2M | #if HAVE_FAST_64BIT | 70 | 13.2M | const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; | 71 | 13.2M | *(uint64_t*)(p + 0*stride)= v; | 72 | 13.2M | if(h==1) return; | 73 | 13.2M | *(uint64_t*)(p + 1*stride)= v; | 74 | 13.2M | if(h==2) return; | 75 | 8.50M | *(uint64_t*)(p + 2*stride)= v; | 76 | 8.50M | *(uint64_t*)(p + 3*stride)= v; | 77 | 12.2M | }else if(w==16){ | 78 | 12.2M | const uint64_t v= val*0x0100000001ULL; | 79 | 12.2M | *(uint64_t*)(p + 0+0*stride)= v; | 80 | 12.2M | *(uint64_t*)(p + 8+0*stride)= v; | 81 | 12.2M | *(uint64_t*)(p + 0+1*stride)= v; | 82 | 12.2M | *(uint64_t*)(p + 8+1*stride)= v; | 83 | 12.2M | if(h==2) return; | 84 | 8.95M | *(uint64_t*)(p + 0+2*stride)= v; | 85 | 8.95M | *(uint64_t*)(p + 8+2*stride)= v; | 86 | 8.95M | *(uint64_t*)(p + 0+3*stride)= v; | 87 | 8.95M | *(uint64_t*)(p + 8+3*stride)= v; | 88 | | #else | 89 | | const uint32_t v= size==2 ? val*0x00010001 : val; | 90 | | *(uint32_t*)(p + 0+0*stride)= v; | 91 | | *(uint32_t*)(p + 4+0*stride)= v; | 92 | | if(h==1) return; | 93 | | *(uint32_t*)(p + 0+1*stride)= v; | 94 | | *(uint32_t*)(p + 4+1*stride)= v; | 95 | | if(h==2) return; | 96 | | *(uint32_t*)(p + 0+2*stride)= v; | 97 | | *(uint32_t*)(p + 4+2*stride)= v; | 98 | | *(uint32_t*)(p + 0+3*stride)= v; | 99 | | *(uint32_t*)(p + 4+3*stride)= v; | 100 | | }else if(w==16){ | 101 | | *(uint32_t*)(p + 0+0*stride)= val; | 102 | | *(uint32_t*)(p + 4+0*stride)= val; | 103 | | *(uint32_t*)(p + 8+0*stride)= val; | 104 | | *(uint32_t*)(p +12+0*stride)= val; | 105 | | *(uint32_t*)(p + 0+1*stride)= val; | 106 | | *(uint32_t*)(p + 4+1*stride)= val; | 107 | | *(uint32_t*)(p + 8+1*stride)= val; | 108 | | *(uint32_t*)(p +12+1*stride)= val; | 109 | | if(h==2) return; | 110 | | *(uint32_t*)(p + 0+2*stride)= val; | 111 | | *(uint32_t*)(p + 4+2*stride)= val; | 112 | | *(uint32_t*)(p + 8+2*stride)= val; | 113 | | *(uint32_t*)(p +12+2*stride)= val; | 114 | | *(uint32_t*)(p + 0+3*stride)= val; | 115 | | *(uint32_t*)(p + 4+3*stride)= val; | 116 | | *(uint32_t*)(p + 8+3*stride)= val; | 117 | | *(uint32_t*)(p +12+3*stride)= val; | 118 | | #endif | 119 | 8.95M | }else | 120 | 0 | av_assert2(0); | 121 | 65.7M | av_assert2(h==4); | 122 | 65.7M | } |
Line | Count | Source | 41 | 22.0M | static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ | 42 | 22.0M | uint8_t *p= (uint8_t*)vp; | 43 | 22.0M | av_assert2(size==1 || size==2 || size==4); | 44 | 22.0M | av_assert2(w<=4); | 45 | | | 46 | 22.0M | w *= size; | 47 | 22.0M | stride *= size; | 48 | | | 49 | 22.0M | av_assert2((((uintptr_t)vp)&(FFMIN(w, 8<<(HAVE_NEON|ARCH_PPC|HAVE_MMX))-1)) == 0); | 50 | 22.0M | av_assert2((stride&(w-1))==0); | 51 | 22.0M | if(w==2){ | 52 | 0 | const uint16_t v= size==4 ? val : val*0x0101; | 53 | 0 | *(uint16_t*)(p + 0*stride)= v; | 54 | 0 | if(h==1) return; | 55 | 0 | *(uint16_t*)(p + 1*stride)= v; | 56 | 0 | if(h==2) return; | 57 | 0 | *(uint16_t*)(p + 2*stride)= v; | 58 | 0 | *(uint16_t*)(p + 3*stride)= v; | 59 | 22.0M | }else if(w==4){ | 60 | 12.9M | const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; | 61 | 12.9M | *(uint32_t*)(p + 0*stride)= v; | 62 | 12.9M | if(h==1) return; | 63 | 12.9M | *(uint32_t*)(p + 1*stride)= v; | 64 | 12.9M | if(h==2) return; | 65 | 4.98M | *(uint32_t*)(p + 2*stride)= v; | 66 | 4.98M | *(uint32_t*)(p + 3*stride)= v; | 67 | 9.03M | }else if(w==8){ | 68 | | // gcc cannot optimize 64-bit math on x86_32 | 69 | 9.03M | #if HAVE_FAST_64BIT | 70 | 9.03M | const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; | 71 | 9.03M | *(uint64_t*)(p + 0*stride)= v; | 72 | 9.03M | if(h==1) return; | 73 | 9.03M | *(uint64_t*)(p + 1*stride)= v; | 74 | 9.03M | if(h==2) return; | 75 | 0 | *(uint64_t*)(p + 2*stride)= v; | 76 | 0 | *(uint64_t*)(p + 3*stride)= v; | 77 | 0 | }else if(w==16){ | 78 | 0 | const uint64_t v= val*0x0100000001ULL; | 79 | 0 | *(uint64_t*)(p + 0+0*stride)= v; | 80 | 0 | *(uint64_t*)(p + 8+0*stride)= v; | 81 | 0 | *(uint64_t*)(p + 0+1*stride)= v; | 82 | 0 | *(uint64_t*)(p + 8+1*stride)= v; | 83 | 0 | if(h==2) return; | 84 | 0 | *(uint64_t*)(p + 0+2*stride)= v; | 85 | 0 | *(uint64_t*)(p + 8+2*stride)= v; | 86 | 0 | *(uint64_t*)(p + 0+3*stride)= v; | 87 | 0 | *(uint64_t*)(p + 8+3*stride)= v; | 88 | | #else | 89 | | const uint32_t v= size==2 ? val*0x00010001 : val; | 90 | | *(uint32_t*)(p + 0+0*stride)= v; | 91 | | *(uint32_t*)(p + 4+0*stride)= v; | 92 | | if(h==1) return; | 93 | | *(uint32_t*)(p + 0+1*stride)= v; | 94 | | *(uint32_t*)(p + 4+1*stride)= v; | 95 | | if(h==2) return; | 96 | | *(uint32_t*)(p + 0+2*stride)= v; | 97 | | *(uint32_t*)(p + 4+2*stride)= v; | 98 | | *(uint32_t*)(p + 0+3*stride)= v; | 99 | | *(uint32_t*)(p + 4+3*stride)= v; | 100 | | }else if(w==16){ | 101 | | *(uint32_t*)(p + 0+0*stride)= val; | 102 | | *(uint32_t*)(p + 4+0*stride)= val; | 103 | | *(uint32_t*)(p + 8+0*stride)= val; | 104 | | *(uint32_t*)(p +12+0*stride)= val; | 105 | | *(uint32_t*)(p + 0+1*stride)= val; | 106 | | *(uint32_t*)(p + 4+1*stride)= val; | 107 | | *(uint32_t*)(p + 8+1*stride)= val; | 108 | | *(uint32_t*)(p +12+1*stride)= val; | 109 | | if(h==2) return; | 110 | | *(uint32_t*)(p + 0+2*stride)= val; | 111 | | *(uint32_t*)(p + 4+2*stride)= val; | 112 | | *(uint32_t*)(p + 8+2*stride)= val; | 113 | | *(uint32_t*)(p +12+2*stride)= val; | 114 | | *(uint32_t*)(p + 0+3*stride)= val; | 115 | | *(uint32_t*)(p + 4+3*stride)= val; | 116 | | *(uint32_t*)(p + 8+3*stride)= val; | 117 | | *(uint32_t*)(p +12+3*stride)= val; | 118 | | #endif | 119 | 0 | }else | 120 | 0 | av_assert2(0); | 121 | 4.98M | av_assert2(h==4); | 122 | 4.98M | } |
Line | Count | Source | 41 | 2.08M | static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ | 42 | 2.08M | uint8_t *p= (uint8_t*)vp; | 43 | 2.08M | av_assert2(size==1 || size==2 || size==4); | 44 | 2.08M | av_assert2(w<=4); | 45 | | | 46 | 2.08M | w *= size; | 47 | 2.08M | stride *= size; | 48 | | | 49 | 2.08M | av_assert2((((uintptr_t)vp)&(FFMIN(w, 8<<(HAVE_NEON|ARCH_PPC|HAVE_MMX))-1)) == 0); | 50 | 2.08M | av_assert2((stride&(w-1))==0); | 51 | 2.08M | if(w==2){ | 52 | 0 | const uint16_t v= size==4 ? val : val*0x0101; | 53 | 0 | *(uint16_t*)(p + 0*stride)= v; | 54 | 0 | if(h==1) return; | 55 | 0 | *(uint16_t*)(p + 1*stride)= v; | 56 | 0 | if(h==2) return; | 57 | 0 | *(uint16_t*)(p + 2*stride)= v; | 58 | 0 | *(uint16_t*)(p + 3*stride)= v; | 59 | 2.08M | }else if(w==4){ | 60 | 1.37M | const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; | 61 | 1.37M | *(uint32_t*)(p + 0*stride)= v; | 62 | 1.37M | if(h==1) return; | 63 | 74.1k | *(uint32_t*)(p + 1*stride)= v; | 64 | 74.1k | if(h==2) return; | 65 | 0 | *(uint32_t*)(p + 2*stride)= v; | 66 | 0 | *(uint32_t*)(p + 3*stride)= v; | 67 | 713k | }else if(w==8){ | 68 | | // gcc cannot optimize 64-bit math on x86_32 | 69 | 557k | #if HAVE_FAST_64BIT | 70 | 557k | const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; | 71 | 557k | *(uint64_t*)(p + 0*stride)= v; | 72 | 557k | if(h==1) return; | 73 | 532k | *(uint64_t*)(p + 1*stride)= v; | 74 | 532k | if(h==2) return; | 75 | 522k | *(uint64_t*)(p + 2*stride)= v; | 76 | 522k | *(uint64_t*)(p + 3*stride)= v; | 77 | 522k | }else if(w==16){ | 78 | 156k | const uint64_t v= val*0x0100000001ULL; | 79 | 156k | *(uint64_t*)(p + 0+0*stride)= v; | 80 | 156k | *(uint64_t*)(p + 8+0*stride)= v; | 81 | 156k | *(uint64_t*)(p + 0+1*stride)= v; | 82 | 156k | *(uint64_t*)(p + 8+1*stride)= v; | 83 | 156k | if(h==2) return; | 84 | 149k | *(uint64_t*)(p + 0+2*stride)= v; | 85 | 149k | *(uint64_t*)(p + 8+2*stride)= v; | 86 | 149k | *(uint64_t*)(p + 0+3*stride)= v; | 87 | 149k | *(uint64_t*)(p + 8+3*stride)= v; | 88 | | #else | 89 | | const uint32_t v= size==2 ? val*0x00010001 : val; | 90 | | *(uint32_t*)(p + 0+0*stride)= v; | 91 | | *(uint32_t*)(p + 4+0*stride)= v; | 92 | | if(h==1) return; | 93 | | *(uint32_t*)(p + 0+1*stride)= v; | 94 | | *(uint32_t*)(p + 4+1*stride)= v; | 95 | | if(h==2) return; | 96 | | *(uint32_t*)(p + 0+2*stride)= v; | 97 | | *(uint32_t*)(p + 4+2*stride)= v; | 98 | | *(uint32_t*)(p + 0+3*stride)= v; | 99 | | *(uint32_t*)(p + 4+3*stride)= v; | 100 | | }else if(w==16){ | 101 | | *(uint32_t*)(p + 0+0*stride)= val; | 102 | | *(uint32_t*)(p + 4+0*stride)= val; | 103 | | *(uint32_t*)(p + 8+0*stride)= val; | 104 | | *(uint32_t*)(p +12+0*stride)= val; | 105 | | *(uint32_t*)(p + 0+1*stride)= val; | 106 | | *(uint32_t*)(p + 4+1*stride)= val; | 107 | | *(uint32_t*)(p + 8+1*stride)= val; | 108 | | *(uint32_t*)(p +12+1*stride)= val; | 109 | | if(h==2) return; | 110 | | *(uint32_t*)(p + 0+2*stride)= val; | 111 | | *(uint32_t*)(p + 4+2*stride)= val; | 112 | | *(uint32_t*)(p + 8+2*stride)= val; | 113 | | *(uint32_t*)(p +12+2*stride)= val; | 114 | | *(uint32_t*)(p + 0+3*stride)= val; | 115 | | *(uint32_t*)(p + 4+3*stride)= val; | 116 | | *(uint32_t*)(p + 8+3*stride)= val; | 117 | | *(uint32_t*)(p +12+3*stride)= val; | 118 | | #endif | 119 | 149k | }else | 120 | 0 | av_assert2(0); | 121 | 671k | av_assert2(h==4); | 122 | 671k | } |
|
123 | | |
124 | | #endif /* AVCODEC_RECTANGLE_H */ |