/src/x265/source/common/picyuv.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /***************************************************************************** |
2 | | * Copyright (C) 2013-2020 MulticoreWare, Inc |
3 | | * |
4 | | * Authors: Steve Borho <steve@borho.org> |
5 | | * Min Chen <chenm003@163.com> |
6 | | * |
7 | | * This program is free software; you can redistribute it and/or modify |
8 | | * it under the terms of the GNU General Public License as published by |
9 | | * the Free Software Foundation; either version 2 of the License, or |
10 | | * (at your option) any later version. |
11 | | * |
12 | | * This program is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | | * GNU General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU General Public License |
18 | | * along with this program; if not, write to the Free Software |
19 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. |
20 | | * |
21 | | * This program is also available under a commercial proprietary license. |
22 | | * For more information, contact us at license @ x265.com. |
23 | | *****************************************************************************/ |
24 | | |
25 | | #include "common.h" |
26 | | #include "picyuv.h" |
27 | | #include "slice.h" |
28 | | #include "primitives.h" |
29 | | |
30 | | using namespace X265_NS; |
31 | | |
32 | | PicYuv::PicYuv() |
33 | 0 | { |
34 | 0 | m_picBuf[0] = NULL; |
35 | 0 | m_picBuf[1] = NULL; |
36 | 0 | m_picBuf[2] = NULL; |
37 | |
|
38 | 0 | m_picOrg[0] = NULL; |
39 | 0 | m_picOrg[1] = NULL; |
40 | 0 | m_picOrg[2] = NULL; |
41 | |
|
42 | 0 | m_cuOffsetY = NULL; |
43 | 0 | m_cuOffsetC = NULL; |
44 | 0 | m_buOffsetY = NULL; |
45 | 0 | m_buOffsetC = NULL; |
46 | |
|
47 | 0 | m_maxLumaLevel = 0; |
48 | 0 | m_avgLumaLevel = 0; |
49 | |
|
50 | 0 | m_maxChromaULevel = 0; |
51 | 0 | m_avgChromaULevel = 0; |
52 | |
|
53 | 0 | m_maxChromaVLevel = 0; |
54 | 0 | m_avgChromaVLevel = 0; |
55 | |
|
56 | | #if (X265_DEPTH > 8) |
57 | | m_minLumaLevel = 0xFFFF; |
58 | | m_minChromaULevel = 0xFFFF; |
59 | | m_minChromaVLevel = 0xFFFF; |
60 | | #else |
61 | 0 | m_minLumaLevel = 0xFF; |
62 | 0 | m_minChromaULevel = 0xFF; |
63 | 0 | m_minChromaVLevel = 0xFF; |
64 | 0 | #endif |
65 | |
|
66 | 0 | m_stride = 0; |
67 | 0 | m_strideC = 0; |
68 | 0 | m_hChromaShift = 0; |
69 | 0 | m_vChromaShift = 0; |
70 | 0 | } |
71 | | |
72 | | bool PicYuv::create(x265_param* param, bool picAlloc, pixel *pixelbuf) |
73 | 0 | { |
74 | 0 | m_param = param; |
75 | 0 | uint32_t picWidth = m_param->sourceWidth; |
76 | 0 | uint32_t picHeight = m_param->sourceHeight; |
77 | 0 | uint32_t picCsp = m_param->internalCsp; |
78 | 0 | m_picWidth = picWidth; |
79 | 0 | m_picHeight = picHeight; |
80 | 0 | m_hChromaShift = CHROMA_H_SHIFT(picCsp); |
81 | 0 | m_vChromaShift = CHROMA_V_SHIFT(picCsp); |
82 | 0 | m_picCsp = picCsp; |
83 | |
|
84 | 0 | uint32_t numCuInWidth = (m_picWidth + param->maxCUSize - 1) / param->maxCUSize; |
85 | 0 | uint32_t numCuInHeight = (m_picHeight + param->maxCUSize - 1) / param->maxCUSize; |
86 | |
|
87 | 0 | m_lumaMarginX = param->maxCUSize + 32; // search margin and 8-tap filter half-length, padded for 32-byte alignment |
88 | 0 | m_lumaMarginY = param->maxCUSize + 16; // margin for 8-tap filter and infinite padding |
89 | 0 | m_stride = (numCuInWidth * param->maxCUSize) + (m_lumaMarginX << 1); |
90 | |
|
91 | 0 | int maxHeight = numCuInHeight * param->maxCUSize; |
92 | 0 | if (pixelbuf) |
93 | 0 | m_picOrg[0] = pixelbuf; |
94 | 0 | else |
95 | 0 | { |
96 | 0 | if (picAlloc) |
97 | 0 | { |
98 | 0 | CHECKED_MALLOC(m_picBuf[0], pixel, m_stride * (maxHeight + (m_lumaMarginY * 2))); |
99 | 0 | m_picOrg[0] = m_picBuf[0] + m_lumaMarginY * m_stride + m_lumaMarginX; |
100 | 0 | } |
101 | 0 | } |
102 | | |
103 | 0 | if (picCsp != X265_CSP_I400) |
104 | 0 | { |
105 | 0 | m_chromaMarginX = m_lumaMarginX; // keep 16-byte alignment for chroma CTUs |
106 | 0 | m_chromaMarginY = m_lumaMarginY >> m_vChromaShift; |
107 | 0 | m_strideC = ((numCuInWidth * m_param->maxCUSize) >> m_hChromaShift) + (m_chromaMarginX * 2); |
108 | 0 | if (picAlloc) |
109 | 0 | { |
110 | 0 | CHECKED_MALLOC(m_picBuf[1], pixel, m_strideC * ((maxHeight >> m_vChromaShift) + (m_chromaMarginY * 2))); |
111 | 0 | CHECKED_MALLOC(m_picBuf[2], pixel, m_strideC * ((maxHeight >> m_vChromaShift) + (m_chromaMarginY * 2))); |
112 | |
|
113 | 0 | m_picOrg[1] = m_picBuf[1] + m_chromaMarginY * m_strideC + m_chromaMarginX; |
114 | 0 | m_picOrg[2] = m_picBuf[2] + m_chromaMarginY * m_strideC + m_chromaMarginX; |
115 | 0 | } |
116 | 0 | } |
117 | 0 | else |
118 | 0 | { |
119 | 0 | m_picBuf[1] = m_picBuf[2] = NULL; |
120 | 0 | m_picOrg[1] = m_picOrg[2] = NULL; |
121 | 0 | } |
122 | 0 | return true; |
123 | | |
124 | 0 | fail: |
125 | 0 | return false; |
126 | 0 | } |
127 | | |
128 | | /*Copy pixels from the picture buffer of a frame to picture buffer of another frame*/ |
129 | | void PicYuv::copyFromFrame(PicYuv* source) |
130 | 0 | { |
131 | 0 | uint32_t numCuInHeight = (m_picHeight + m_param->maxCUSize - 1) / m_param->maxCUSize; |
132 | |
|
133 | 0 | int maxHeight = numCuInHeight * m_param->maxCUSize; |
134 | 0 | memcpy(m_picBuf[0], source->m_picBuf[0], sizeof(pixel)* m_stride * (maxHeight + (m_lumaMarginY * 2))); |
135 | 0 | m_picOrg[0] = m_picBuf[0] + m_lumaMarginY * m_stride + m_lumaMarginX; |
136 | |
|
137 | 0 | if (m_picCsp != X265_CSP_I400) |
138 | 0 | { |
139 | 0 | memcpy(m_picBuf[1], source->m_picBuf[1], sizeof(pixel)* m_strideC * ((maxHeight >> m_vChromaShift) + (m_chromaMarginY * 2))); |
140 | 0 | memcpy(m_picBuf[2], source->m_picBuf[2], sizeof(pixel)* m_strideC * ((maxHeight >> m_vChromaShift) + (m_chromaMarginY * 2))); |
141 | |
|
142 | 0 | m_picOrg[1] = m_picBuf[1] + m_chromaMarginY * m_strideC + m_chromaMarginX; |
143 | 0 | m_picOrg[2] = m_picBuf[2] + m_chromaMarginY * m_strideC + m_chromaMarginX; |
144 | 0 | } |
145 | 0 | else |
146 | 0 | { |
147 | 0 | m_picBuf[1] = m_picBuf[2] = NULL; |
148 | 0 | m_picOrg[1] = m_picOrg[2] = NULL; |
149 | 0 | } |
150 | 0 | } |
151 | | |
152 | | bool PicYuv::createScaledPicYUV(x265_param* param, uint8_t scaleFactor) |
153 | 0 | { |
154 | 0 | m_param = param; |
155 | 0 | m_picWidth = m_param->sourceWidth / scaleFactor; |
156 | 0 | m_picHeight = m_param->sourceHeight / scaleFactor; |
157 | 0 | int maxBlocksInRow = (m_picWidth + X265_LOWRES_CU_SIZE - 1) >> X265_LOWRES_CU_BITS; |
158 | 0 | int maxBlocksInCol = (m_picHeight + X265_LOWRES_CU_SIZE - 1) >> X265_LOWRES_CU_BITS; |
159 | 0 | m_picWidth = maxBlocksInRow * X265_LOWRES_CU_SIZE; |
160 | 0 | m_picHeight = maxBlocksInCol * X265_LOWRES_CU_SIZE; |
161 | |
|
162 | 0 | m_picCsp = m_param->internalCsp; |
163 | 0 | m_hChromaShift = CHROMA_H_SHIFT(m_picCsp); |
164 | 0 | m_vChromaShift = CHROMA_V_SHIFT(m_picCsp); |
165 | |
|
166 | 0 | uint32_t numCuInWidth = (m_picWidth + param->maxCUSize - 1) / param->maxCUSize; |
167 | 0 | uint32_t numCuInHeight = (m_picHeight + param->maxCUSize - 1) / param->maxCUSize; |
168 | |
|
169 | 0 | m_lumaMarginX = 128; // search margin for L0 and L1 ME in horizontal direction |
170 | 0 | m_lumaMarginY = 128; // search margin for L0 and L1 ME in vertical direction |
171 | 0 | m_stride = (numCuInWidth * param->maxCUSize) + (m_lumaMarginX << 1); |
172 | |
|
173 | 0 | int maxHeight = numCuInHeight * param->maxCUSize; |
174 | 0 | CHECKED_MALLOC_ZERO(m_picBuf[0], pixel, m_stride * (maxHeight + (m_lumaMarginY * 2))); |
175 | 0 | m_picOrg[0] = m_picBuf[0] + m_lumaMarginY * m_stride + m_lumaMarginX; |
176 | 0 | m_picBuf[1] = m_picBuf[2] = NULL; |
177 | 0 | m_picOrg[1] = m_picOrg[2] = NULL; |
178 | 0 | return true; |
179 | | |
180 | 0 | fail: |
181 | 0 | return false; |
182 | 0 | } |
183 | | |
184 | | int PicYuv::getLumaBufLen(uint32_t picWidth, uint32_t picHeight, uint32_t picCsp) |
185 | 0 | { |
186 | 0 | m_picWidth = picWidth; |
187 | 0 | m_picHeight = picHeight; |
188 | 0 | m_hChromaShift = CHROMA_H_SHIFT(picCsp); |
189 | 0 | m_vChromaShift = CHROMA_V_SHIFT(picCsp); |
190 | 0 | m_picCsp = picCsp; |
191 | |
|
192 | 0 | uint32_t numCuInWidth = (m_picWidth + m_param->maxCUSize - 1) / m_param->maxCUSize; |
193 | 0 | uint32_t numCuInHeight = (m_picHeight + m_param->maxCUSize - 1) / m_param->maxCUSize; |
194 | |
|
195 | 0 | m_lumaMarginX = m_param->maxCUSize + 32; // search margin and 8-tap filter half-length, padded for 32-byte alignment |
196 | 0 | m_lumaMarginY = m_param->maxCUSize + 16; // margin for 8-tap filter and infinite padding |
197 | 0 | m_stride = (numCuInWidth * m_param->maxCUSize) + (m_lumaMarginX << 1); |
198 | |
|
199 | 0 | int maxHeight = numCuInHeight * m_param->maxCUSize; |
200 | 0 | int bufLen = (int)(m_stride * (maxHeight + (m_lumaMarginY * 2))); |
201 | |
|
202 | 0 | return bufLen; |
203 | 0 | } |
204 | | |
205 | | /* the first picture allocated by the encoder will be asked to generate these |
206 | | * offset arrays. Once generated, they will be provided to all future PicYuv |
207 | | * allocated by the same encoder. */ |
208 | | bool PicYuv::createOffsets(const SPS& sps) |
209 | 0 | { |
210 | 0 | uint32_t numPartitions = 1 << (m_param->unitSizeDepth * 2); |
211 | |
|
212 | 0 | if (m_picCsp != X265_CSP_I400) |
213 | 0 | { |
214 | 0 | CHECKED_MALLOC(m_cuOffsetY, intptr_t, sps.numCuInWidth * sps.numCuInHeight); |
215 | 0 | CHECKED_MALLOC(m_cuOffsetC, intptr_t, sps.numCuInWidth * sps.numCuInHeight); |
216 | 0 | for (uint32_t cuRow = 0; cuRow < sps.numCuInHeight; cuRow++) |
217 | 0 | { |
218 | 0 | for (uint32_t cuCol = 0; cuCol < sps.numCuInWidth; cuCol++) |
219 | 0 | { |
220 | 0 | m_cuOffsetY[cuRow * sps.numCuInWidth + cuCol] = m_stride * cuRow * m_param->maxCUSize + cuCol * m_param->maxCUSize; |
221 | 0 | m_cuOffsetC[cuRow * sps.numCuInWidth + cuCol] = m_strideC * cuRow * (m_param->maxCUSize >> m_vChromaShift) + cuCol * (m_param->maxCUSize >> m_hChromaShift); |
222 | 0 | } |
223 | 0 | } |
224 | |
|
225 | 0 | CHECKED_MALLOC(m_buOffsetY, intptr_t, (size_t)numPartitions); |
226 | 0 | CHECKED_MALLOC(m_buOffsetC, intptr_t, (size_t)numPartitions); |
227 | 0 | for (uint32_t idx = 0; idx < numPartitions; ++idx) |
228 | 0 | { |
229 | 0 | intptr_t x = g_zscanToPelX[idx]; |
230 | 0 | intptr_t y = g_zscanToPelY[idx]; |
231 | 0 | m_buOffsetY[idx] = m_stride * y + x; |
232 | 0 | m_buOffsetC[idx] = m_strideC * (y >> m_vChromaShift) + (x >> m_hChromaShift); |
233 | 0 | } |
234 | 0 | } |
235 | 0 | else |
236 | 0 | { |
237 | 0 | CHECKED_MALLOC(m_cuOffsetY, intptr_t, sps.numCuInWidth * sps.numCuInHeight); |
238 | 0 | for (uint32_t cuRow = 0; cuRow < sps.numCuInHeight; cuRow++) |
239 | 0 | for (uint32_t cuCol = 0; cuCol < sps.numCuInWidth; cuCol++) |
240 | 0 | m_cuOffsetY[cuRow * sps.numCuInWidth + cuCol] = m_stride * cuRow * m_param->maxCUSize + cuCol * m_param->maxCUSize; |
241 | |
|
242 | 0 | CHECKED_MALLOC(m_buOffsetY, intptr_t, (size_t)numPartitions); |
243 | 0 | for (uint32_t idx = 0; idx < numPartitions; ++idx) |
244 | 0 | { |
245 | 0 | intptr_t x = g_zscanToPelX[idx]; |
246 | 0 | intptr_t y = g_zscanToPelY[idx]; |
247 | 0 | m_buOffsetY[idx] = m_stride * y + x; |
248 | 0 | } |
249 | 0 | } |
250 | 0 | return true; |
251 | | |
252 | 0 | fail: |
253 | 0 | return false; |
254 | 0 | } |
255 | | |
256 | | void PicYuv::destroy() |
257 | 0 | { |
258 | 0 | X265_FREE(m_picBuf[0]); |
259 | 0 | X265_FREE(m_picBuf[1]); |
260 | 0 | X265_FREE(m_picBuf[2]); |
261 | 0 | } |
262 | | |
263 | | /* Copy pixels from an x265_picture into internal PicYuv instance. |
264 | | * Shift pixels as necessary, mask off bits above X265_DEPTH for safety. */ |
265 | | void PicYuv::copyFromPicture(const x265_picture& pic, const x265_param& param, int padx, int pady, bool isBase) |
266 | 0 | { |
267 | | /* m_picWidth is the width that is being encoded, padx indicates how many |
268 | | * of those pixels are padding to reach multiple of MinCU(4) size. |
269 | | * |
270 | | * Internally, we need to extend rows out to a multiple of 16 for lowres |
271 | | * downscale and other operations. But those padding pixels are never |
272 | | * encoded. |
273 | | * |
274 | | * The same applies to m_picHeight and pady */ |
275 | | |
276 | | /* width and height - without padsize (input picture raw width and height) */ |
277 | 0 | int width = m_picWidth - padx; |
278 | 0 | int height = m_picHeight - pady; |
279 | | |
280 | | /* internal pad to multiple of 16x16 blocks */ |
281 | 0 | uint8_t rem = width & 15; |
282 | |
|
283 | 0 | padx = rem ? 16 - rem : padx; |
284 | 0 | rem = height & 15; |
285 | 0 | pady = rem ? 16 - rem : pady; |
286 | | |
287 | | /* add one more row and col of pad for downscale interpolation, fixes |
288 | | * warnings from valgrind about using uninitialized pixels */ |
289 | 0 | padx++; |
290 | 0 | pady++; |
291 | 0 | m_picCsp = pic.colorSpace; |
292 | |
|
293 | 0 | X265_CHECK(pic.bitDepth >= 8, "pic.bitDepth check failure"); |
294 | |
|
295 | 0 | uint64_t lumaSum; |
296 | 0 | uint64_t cbSum; |
297 | 0 | uint64_t crSum; |
298 | 0 | lumaSum = cbSum = crSum = 0; |
299 | |
|
300 | 0 | if (m_param->bCopyPicToFrame) |
301 | 0 | { |
302 | 0 | if (pic.bitDepth == 8) |
303 | 0 | { |
304 | | #if (X265_DEPTH > 8) |
305 | | { |
306 | | pixel *yPixel = m_picOrg[0]; |
307 | | |
308 | | uint8_t *yChar = (uint8_t*)pic.planes[0]; |
309 | | int shift = (X265_DEPTH - 8); |
310 | | |
311 | | primitives.planecopy_cp(yChar, pic.stride[0] / sizeof(*yChar), yPixel, m_stride, width, height, shift); |
312 | | |
313 | | if (param.internalCsp != X265_CSP_I400) |
314 | | { |
315 | | pixel *uPixel = m_picOrg[1]; |
316 | | pixel *vPixel = m_picOrg[2]; |
317 | | |
318 | | uint8_t *uChar = (uint8_t*)pic.planes[1]; |
319 | | uint8_t *vChar = (uint8_t*)pic.planes[2]; |
320 | | |
321 | | primitives.planecopy_cp(uChar, pic.stride[1] / sizeof(*uChar), uPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift); |
322 | | primitives.planecopy_cp(vChar, pic.stride[2] / sizeof(*vChar), vPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift); |
323 | | } |
324 | | } |
325 | | #else /* Case for (X265_DEPTH == 8) */ |
326 | | // TODO: Does we need this path? may merge into above in future |
327 | 0 | { |
328 | 0 | if (isBase || param.numViews > 1) |
329 | 0 | { |
330 | 0 | int offsetX, offsetY; |
331 | 0 | offsetX = (!isBase && pic.format == 1 ? width : 0); |
332 | 0 | offsetY = (!isBase && pic.format == 2 ? pic.stride[0] * height : 0); |
333 | 0 | pixel *yPixel = m_picOrg[0]; |
334 | 0 | uint8_t* yChar = (uint8_t*)pic.planes[0] + offsetX + offsetY; |
335 | |
|
336 | 0 | for (int r = 0; r < height; r++) |
337 | 0 | { |
338 | 0 | memcpy(yPixel, yChar, width * sizeof(pixel)); |
339 | |
|
340 | 0 | yPixel += m_stride; |
341 | 0 | yChar += pic.stride[0] / sizeof(*yChar); |
342 | 0 | } |
343 | |
|
344 | 0 | if (param.internalCsp != X265_CSP_I400) |
345 | 0 | { |
346 | 0 | offsetX = offsetX >> m_hChromaShift; |
347 | 0 | int offsetYU = (!isBase && pic.format == 2 ? pic.stride[1] * (height >> m_vChromaShift) : 0); |
348 | 0 | int offsetYV = (!isBase && pic.format == 2 ? pic.stride[2] * (height >> m_vChromaShift) : 0); |
349 | |
|
350 | 0 | pixel *uPixel = m_picOrg[1]; |
351 | 0 | pixel *vPixel = m_picOrg[2]; |
352 | |
|
353 | 0 | uint8_t* uChar = (uint8_t*)pic.planes[1] + offsetX + offsetYU; |
354 | 0 | uint8_t* vChar = (uint8_t*)pic.planes[2] + offsetX + offsetYV; |
355 | |
|
356 | 0 | for (int r = 0; r < height >> m_vChromaShift; r++) |
357 | 0 | { |
358 | 0 | memcpy(uPixel, uChar, (width >> m_hChromaShift) * sizeof(pixel)); |
359 | 0 | memcpy(vPixel, vChar, (width >> m_hChromaShift) * sizeof(pixel)); |
360 | |
|
361 | 0 | uPixel += m_strideC; |
362 | 0 | vPixel += m_strideC; |
363 | 0 | uChar += pic.stride[1] / sizeof(*uChar); |
364 | 0 | vChar += pic.stride[2] / sizeof(*vChar); |
365 | 0 | } |
366 | 0 | } |
367 | 0 | } |
368 | | #if ENABLE_ALPHA |
369 | | if (!isBase && param.bEnableAlpha) |
370 | | { |
371 | | pixel* aPixel = m_picOrg[0]; |
372 | | uint8_t* aChar = (uint8_t*)pic.planes[3]; |
373 | | |
374 | | for (int r = 0; r < height; r++) |
375 | | { |
376 | | memcpy(aPixel, aChar, width * sizeof(pixel)); |
377 | | |
378 | | aPixel += m_stride; |
379 | | aChar += pic.stride[0] / sizeof(*aChar); |
380 | | } |
381 | | |
382 | | pixel* uPixel = m_picOrg[1]; |
383 | | pixel* vPixel = m_picOrg[2]; |
384 | | |
385 | | for (int r = 0; r < height >> m_vChromaShift; r++) |
386 | | { |
387 | | memset(uPixel, 128, (width >> m_hChromaShift) * sizeof(pixel)); |
388 | | memset(vPixel, 128, (width >> m_hChromaShift) * sizeof(pixel)); |
389 | | |
390 | | uPixel += m_strideC; |
391 | | vPixel += m_strideC; |
392 | | } |
393 | | } |
394 | | #endif |
395 | 0 | } |
396 | 0 | #endif /* (X265_DEPTH > 8) */ |
397 | 0 | } |
398 | 0 | else /* pic.bitDepth > 8 */ |
399 | 0 | { |
400 | | /* defensive programming, mask off bits that are supposed to be zero */ |
401 | 0 | if (isBase) |
402 | 0 | { |
403 | 0 | uint16_t mask = (1 << X265_DEPTH) - 1; |
404 | 0 | int shift = abs(pic.bitDepth - X265_DEPTH); |
405 | 0 | pixel* yPixel = m_picOrg[0]; |
406 | |
|
407 | 0 | uint16_t* yShort = (uint16_t*)pic.planes[0]; |
408 | |
|
409 | 0 | if (pic.bitDepth > X265_DEPTH) |
410 | 0 | { |
411 | | /* shift right and mask pixels to final size */ |
412 | 0 | primitives.planecopy_sp(yShort, pic.stride[0] / sizeof(*yShort), yPixel, m_stride, width, height, shift, mask); |
413 | 0 | } |
414 | 0 | else /* Case for (pic.bitDepth <= X265_DEPTH) */ |
415 | 0 | { |
416 | | /* shift left and mask pixels to final size */ |
417 | 0 | primitives.planecopy_sp_shl(yShort, pic.stride[0] / sizeof(*yShort), yPixel, m_stride, width, height, shift, mask); |
418 | 0 | } |
419 | |
|
420 | 0 | if (param.internalCsp != X265_CSP_I400) |
421 | 0 | { |
422 | 0 | pixel* uPixel = m_picOrg[1]; |
423 | 0 | pixel* vPixel = m_picOrg[2]; |
424 | |
|
425 | 0 | uint16_t* uShort = (uint16_t*)pic.planes[1]; |
426 | 0 | uint16_t* vShort = (uint16_t*)pic.planes[2]; |
427 | |
|
428 | 0 | if (pic.bitDepth > X265_DEPTH) |
429 | 0 | { |
430 | 0 | primitives.planecopy_sp(uShort, pic.stride[1] / sizeof(*uShort), uPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift, mask); |
431 | 0 | primitives.planecopy_sp(vShort, pic.stride[2] / sizeof(*vShort), vPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift, mask); |
432 | 0 | } |
433 | 0 | else /* Case for (pic.bitDepth <= X265_DEPTH) */ |
434 | 0 | { |
435 | 0 | primitives.planecopy_sp_shl(uShort, pic.stride[1] / sizeof(*uShort), uPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift, mask); |
436 | 0 | primitives.planecopy_sp_shl(vShort, pic.stride[2] / sizeof(*vShort), vPixel, m_strideC, width >> m_hChromaShift, height >> m_vChromaShift, shift, mask); |
437 | 0 | } |
438 | 0 | } |
439 | 0 | } |
440 | | #if ENABLE_ALPHA |
441 | | if (!isBase && param.bEnableAlpha) |
442 | | { |
443 | | /* defensive programming, mask off bits that are supposed to be zero */ |
444 | | uint16_t mask = (1 << X265_DEPTH) - 1; |
445 | | int shift = abs(pic.bitDepth - X265_DEPTH); |
446 | | pixel* yPixel = m_picOrg[0]; |
447 | | |
448 | | uint16_t* yShort = (uint16_t*)pic.planes[3]; |
449 | | |
450 | | if (pic.bitDepth > X265_DEPTH) |
451 | | { |
452 | | /* shift right and mask pixels to final size */ |
453 | | primitives.planecopy_sp(yShort, pic.stride[0] / sizeof(*yShort), yPixel, m_stride, width, height, shift, mask); |
454 | | } |
455 | | else /* Case for (pic.bitDepth <= X265_DEPTH) */ |
456 | | { |
457 | | /* shift left and mask pixels to final size */ |
458 | | primitives.planecopy_sp_shl(yShort, pic.stride[0] / sizeof(*yShort), yPixel, m_stride, width, height, shift, mask); |
459 | | } |
460 | | |
461 | | if (param.internalCsp != X265_CSP_I400) |
462 | | { |
463 | | pixel* uPixel = m_picOrg[1]; |
464 | | pixel* vPixel = m_picOrg[2]; |
465 | | |
466 | | for (int r = 0; r < height >> m_vChromaShift; r++) |
467 | | { |
468 | | for (int c = 0; c < (width >> m_hChromaShift); c++) |
469 | | { |
470 | | uPixel[c] = ((1 << X265_DEPTH) >> 1); |
471 | | vPixel[c] = ((1 << X265_DEPTH) >> 1); |
472 | | } |
473 | | uPixel += m_strideC; |
474 | | vPixel += m_strideC; |
475 | | } |
476 | | } |
477 | | } |
478 | | #endif |
479 | 0 | } |
480 | 0 | } |
481 | 0 | else |
482 | 0 | { |
483 | 0 | m_picOrg[0] = (pixel*)pic.planes[0]; |
484 | 0 | m_picOrg[1] = (pixel*)pic.planes[1]; |
485 | 0 | m_picOrg[2] = (pixel*)pic.planes[2]; |
486 | 0 | } |
487 | |
|
488 | 0 | pixel *Y = m_picOrg[0]; |
489 | 0 | pixel *U = m_picOrg[1]; |
490 | 0 | pixel *V = m_picOrg[2]; |
491 | |
|
492 | 0 | pixel *yPic = m_picOrg[0]; |
493 | 0 | pixel *uPic = m_picOrg[1]; |
494 | 0 | pixel *vPic = m_picOrg[2]; |
495 | |
|
496 | 0 | if(param.minLuma != 0 || param.maxLuma != PIXEL_MAX) |
497 | 0 | { |
498 | 0 | for (int r = 0; r < height; r++) |
499 | 0 | { |
500 | 0 | for (int c = 0; c < width; c++) |
501 | 0 | { |
502 | 0 | yPic[c] = X265_MIN(yPic[c], (pixel)param.maxLuma); |
503 | 0 | yPic[c] = X265_MAX(yPic[c], (pixel)param.minLuma); |
504 | 0 | } |
505 | 0 | yPic += m_stride; |
506 | 0 | } |
507 | 0 | } |
508 | 0 | yPic = m_picOrg[0]; |
509 | 0 | if (param.csvLogLevel >= 2 || param.maxCLL || param.maxFALL) |
510 | 0 | { |
511 | 0 | for (int r = 0; r < height; r++) |
512 | 0 | { |
513 | 0 | for (int c = 0; c < width; c++) |
514 | 0 | { |
515 | 0 | m_maxLumaLevel = X265_MAX(yPic[c], m_maxLumaLevel); |
516 | 0 | m_minLumaLevel = X265_MIN(yPic[c], m_minLumaLevel); |
517 | 0 | lumaSum += yPic[c]; |
518 | 0 | } |
519 | 0 | yPic += m_stride; |
520 | 0 | } |
521 | 0 | m_avgLumaLevel = (double)lumaSum / (m_picHeight * m_picWidth); |
522 | 0 | } |
523 | 0 | if (param.csvLogLevel >= 2) |
524 | 0 | { |
525 | 0 | if (param.internalCsp != X265_CSP_I400) |
526 | 0 | { |
527 | 0 | for (int r = 0; r < height >> m_vChromaShift; r++) |
528 | 0 | { |
529 | 0 | for (int c = 0; c < width >> m_hChromaShift; c++) |
530 | 0 | { |
531 | 0 | m_maxChromaULevel = X265_MAX(uPic[c], m_maxChromaULevel); |
532 | 0 | m_minChromaULevel = X265_MIN(uPic[c], m_minChromaULevel); |
533 | 0 | cbSum += uPic[c]; |
534 | |
|
535 | 0 | m_maxChromaVLevel = X265_MAX(vPic[c], m_maxChromaVLevel); |
536 | 0 | m_minChromaVLevel = X265_MIN(vPic[c], m_minChromaVLevel); |
537 | 0 | crSum += vPic[c]; |
538 | 0 | } |
539 | |
|
540 | 0 | uPic += m_strideC; |
541 | 0 | vPic += m_strideC; |
542 | 0 | } |
543 | 0 | m_avgChromaULevel = (double)cbSum / ((height >> m_vChromaShift) * (width >> m_hChromaShift)); |
544 | 0 | m_avgChromaVLevel = (double)crSum / ((height >> m_vChromaShift) * (width >> m_hChromaShift)); |
545 | 0 | } |
546 | 0 | } |
547 | |
|
548 | | #if HIGH_BIT_DEPTH |
549 | | bool calcHDRParams = !!param.minLuma || (param.maxLuma != PIXEL_MAX); |
550 | | /* Apply min/max luma bounds for HDR pixel manipulations */ |
551 | | if (calcHDRParams) |
552 | | { |
553 | | X265_CHECK(pic.bitDepth == 10, "HDR stats can be applied/calculated only for 10bpp content"); |
554 | | uint64_t sumLuma; |
555 | | m_maxLumaLevel = primitives.planeClipAndMax(Y, m_stride, width, height, &sumLuma, (pixel)param.minLuma, (pixel)param.maxLuma); |
556 | | m_avgLumaLevel = (double) sumLuma / (m_picHeight * m_picWidth); |
557 | | } |
558 | | #else |
559 | 0 | (void) param; |
560 | 0 | #endif |
561 | | |
562 | | /* extend the right edge if width was not multiple of the minimum CU size */ |
563 | 0 | for (int r = 0; r < height; r++) |
564 | 0 | { |
565 | 0 | for (int x = 0; x < padx; x++) |
566 | 0 | Y[width + x] = Y[width - 1]; |
567 | 0 | Y += m_stride; |
568 | 0 | } |
569 | | |
570 | | /* extend the bottom if height was not multiple of the minimum CU size */ |
571 | 0 | Y = m_picOrg[0] + (height - 1) * m_stride; |
572 | 0 | for (int i = 1; i <= pady; i++) |
573 | 0 | memcpy(Y + i * m_stride, Y, (width + padx) * sizeof(pixel)); |
574 | |
|
575 | 0 | if (param.internalCsp != X265_CSP_I400) |
576 | 0 | { |
577 | 0 | for (int r = 0; r < height >> m_vChromaShift; r++) |
578 | 0 | { |
579 | 0 | for (int x = 0; x < padx >> m_hChromaShift; x++) |
580 | 0 | { |
581 | 0 | U[(width >> m_hChromaShift) + x] = U[(width >> m_hChromaShift) - 1]; |
582 | 0 | V[(width >> m_hChromaShift) + x] = V[(width >> m_hChromaShift) - 1]; |
583 | 0 | } |
584 | |
|
585 | 0 | U += m_strideC; |
586 | 0 | V += m_strideC; |
587 | 0 | } |
588 | |
|
589 | 0 | U = m_picOrg[1] + ((height >> m_vChromaShift) - 1) * m_strideC; |
590 | 0 | V = m_picOrg[2] + ((height >> m_vChromaShift) - 1) * m_strideC; |
591 | |
|
592 | 0 | for (int j = 1; j <= pady >> m_vChromaShift; j++) |
593 | 0 | { |
594 | 0 | memcpy(U + j * m_strideC, U, ((width + padx) >> m_hChromaShift) * sizeof(pixel)); |
595 | 0 | memcpy(V + j * m_strideC, V, ((width + padx) >> m_hChromaShift) * sizeof(pixel)); |
596 | 0 | } |
597 | 0 | } |
598 | 0 | } |
599 | | |
600 | | namespace X265_NS { |
601 | | |
602 | | template<uint32_t OUTPUT_BITDEPTH_DIV8> |
603 | | static void md5_block(MD5Context& md5, const pixel* plane, uint32_t n) |
604 | 0 | { |
605 | | /* create a 64 byte buffer for packing pixel's into */ |
606 | 0 | uint8_t buf[64 / OUTPUT_BITDEPTH_DIV8][OUTPUT_BITDEPTH_DIV8]; |
607 | |
|
608 | 0 | for (uint32_t i = 0; i < n; i++) |
609 | 0 | { |
610 | 0 | pixel pel = plane[i]; |
611 | | /* perform bitdepth and endian conversion */ |
612 | 0 | for (uint32_t d = 0; d < OUTPUT_BITDEPTH_DIV8; d++) |
613 | 0 | buf[i][d] = (uint8_t)(pel >> (d * 8)); |
614 | 0 | } |
615 | |
|
616 | 0 | MD5Update(&md5, (uint8_t*)buf, n * OUTPUT_BITDEPTH_DIV8); |
617 | 0 | } Unexecuted instantiation: picyuv.cpp:void x265::md5_block<1u>(x265::MD5Context&, unsigned char const*, unsigned int) Unexecuted instantiation: picyuv.cpp:void x265::md5_block<2u>(x265::MD5Context&, unsigned char const*, unsigned int) |
618 | | |
619 | | /* Update md5 with all samples in plane in raster order, each sample |
620 | | * is adjusted to OUTBIT_BITDEPTH_DIV8 */ |
621 | | template<uint32_t OUTPUT_BITDEPTH_DIV8> |
622 | | static void md5_plane(MD5Context& md5, const pixel* plane, uint32_t width, uint32_t height, intptr_t stride) |
623 | 0 | { |
624 | | /* N is the number of samples to process per md5 update. |
625 | | * All N samples must fit in buf */ |
626 | 0 | uint32_t N = 32; |
627 | 0 | uint32_t width_modN = width % N; |
628 | 0 | uint32_t width_less_modN = width - width_modN; |
629 | |
|
630 | 0 | for (uint32_t y = 0; y < height; y++) |
631 | 0 | { |
632 | | /* convert pel's into uint32_t chars in little endian byte order. |
633 | | * NB, for 8bit data, data is truncated to 8bits. */ |
634 | 0 | for (uint32_t x = 0; x < width_less_modN; x += N) |
635 | 0 | md5_block<OUTPUT_BITDEPTH_DIV8>(md5, &plane[y * stride + x], N); |
636 | | |
637 | | /* mop up any of the remaining line */ |
638 | 0 | md5_block<OUTPUT_BITDEPTH_DIV8>(md5, &plane[y * stride + width_less_modN], width_modN); |
639 | 0 | } |
640 | 0 | } Unexecuted instantiation: picyuv.cpp:void x265::md5_plane<1u>(x265::MD5Context&, unsigned char const*, unsigned int, unsigned int, long) Unexecuted instantiation: picyuv.cpp:void x265::md5_plane<2u>(x265::MD5Context&, unsigned char const*, unsigned int, unsigned int, long) |
641 | | |
642 | | void updateCRC(const pixel* plane, uint32_t& crcVal, uint32_t height, uint32_t width, intptr_t stride) |
643 | 0 | { |
644 | 0 | uint32_t crcMsb; |
645 | 0 | uint32_t bitVal; |
646 | 0 | uint32_t bitIdx; |
647 | |
|
648 | 0 | for (uint32_t y = 0; y < height; y++) |
649 | 0 | { |
650 | 0 | for (uint32_t x = 0; x < width; x++) |
651 | 0 | { |
652 | | // take CRC of first pictureData byte |
653 | 0 | for (bitIdx = 0; bitIdx < 8; bitIdx++) |
654 | 0 | { |
655 | 0 | crcMsb = (crcVal >> 15) & 1; |
656 | 0 | bitVal = (plane[y * stride + x] >> (7 - bitIdx)) & 1; |
657 | 0 | crcVal = (((crcVal << 1) + bitVal) & 0xffff) ^ (crcMsb * 0x1021); |
658 | 0 | } |
659 | |
|
660 | | #if _MSC_VER |
661 | | #pragma warning(disable: 4127) // conditional expression is constant |
662 | | #endif |
663 | | // take CRC of second pictureData byte if bit depth is greater than 8-bits |
664 | 0 | if (X265_DEPTH > 8) |
665 | 0 | { |
666 | 0 | for (bitIdx = 0; bitIdx < 8; bitIdx++) |
667 | 0 | { |
668 | 0 | crcMsb = (crcVal >> 15) & 1; |
669 | 0 | bitVal = (plane[y * stride + x] >> (15 - bitIdx)) & 1; |
670 | 0 | crcVal = (((crcVal << 1) + bitVal) & 0xffff) ^ (crcMsb * 0x1021); |
671 | 0 | } |
672 | 0 | } |
673 | 0 | } |
674 | 0 | } |
675 | 0 | } |
676 | | |
677 | | void crcFinish(uint32_t& crcVal, uint8_t digest[16]) |
678 | 0 | { |
679 | 0 | uint32_t crcMsb; |
680 | |
|
681 | 0 | for (int bitIdx = 0; bitIdx < 16; bitIdx++) |
682 | 0 | { |
683 | 0 | crcMsb = (crcVal >> 15) & 1; |
684 | 0 | crcVal = ((crcVal << 1) & 0xffff) ^ (crcMsb * 0x1021); |
685 | 0 | } |
686 | |
|
687 | 0 | digest[0] = (crcVal >> 8) & 0xff; |
688 | 0 | digest[1] = crcVal & 0xff; |
689 | 0 | } |
690 | | |
691 | | void updateChecksum(const pixel* plane, uint32_t& checksumVal, uint32_t height, uint32_t width, intptr_t stride, int row, uint32_t cuHeight) |
692 | 0 | { |
693 | 0 | uint8_t xor_mask; |
694 | |
|
695 | 0 | for (uint32_t y = row * cuHeight; y < ((row * cuHeight) + height); y++) |
696 | 0 | { |
697 | 0 | for (uint32_t x = 0; x < width; x++) |
698 | 0 | { |
699 | 0 | xor_mask = (uint8_t)((x & 0xff) ^ (y & 0xff) ^ (x >> 8) ^ (y >> 8)); |
700 | 0 | checksumVal = (checksumVal + ((plane[y * stride + x] & 0xff) ^ xor_mask)) & 0xffffffff; |
701 | |
|
702 | 0 | if (X265_DEPTH > 8) |
703 | 0 | checksumVal = (checksumVal + ((plane[y * stride + x] >> 7 >> 1) ^ xor_mask)) & 0xffffffff; |
704 | 0 | } |
705 | 0 | } |
706 | 0 | } |
707 | | |
708 | | void checksumFinish(uint32_t checksum, uint8_t digest[16]) |
709 | 0 | { |
710 | 0 | digest[0] = (checksum >> 24) & 0xff; |
711 | 0 | digest[1] = (checksum >> 16) & 0xff; |
712 | 0 | digest[2] = (checksum >> 8) & 0xff; |
713 | 0 | digest[3] = checksum & 0xff; |
714 | 0 | } |
715 | | |
716 | | void updateMD5Plane(MD5Context& md5, const pixel* plane, uint32_t width, uint32_t height, intptr_t stride) |
717 | 0 | { |
718 | | /* choose an md5_plane packing function based on the system bitdepth */ |
719 | 0 | typedef void(*MD5PlaneFunc)(MD5Context&, const pixel*, uint32_t, uint32_t, intptr_t); |
720 | 0 | MD5PlaneFunc md5_plane_func; |
721 | 0 | md5_plane_func = X265_DEPTH <= 8 ? (MD5PlaneFunc)md5_plane<1> : (MD5PlaneFunc)md5_plane<2>; |
722 | |
|
723 | 0 | md5_plane_func(md5, plane, width, height, stride); |
724 | 0 | } |
725 | | } |