Coverage Report

Created: 2025-12-31 07:57

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/theora/lib/analyze.c
Line
Count
Source
1
/********************************************************************
2
 *                                                                  *
3
 * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE.   *
4
 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS     *
5
 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
6
 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING.       *
7
 *                                                                  *
8
 * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009,2025           *
9
 * by the Xiph.Org Foundation https://www.xiph.org/                 *
10
 *                                                                  *
11
 ********************************************************************
12
13
  function: mode selection code
14
15
 ********************************************************************/
16
#include <limits.h>
17
#include <string.h>
18
#include "encint.h"
19
#include "modedec.h"
20
#if defined(OC_COLLECT_METRICS)
21
# include "collect.c"
22
#endif
23
24
25
26
typedef struct oc_rd_metric          oc_rd_metric;
27
typedef struct oc_mode_choice        oc_mode_choice;
28
29
30
31
/*There are 8 possible schemes used to encode macro block modes.
32
  Schemes 0-6 use a maximally-skewed Huffman code to code each of the modes.
33
  The same set of Huffman codes is used for each of these 7 schemes, but the
34
   mode assigned to each codeword varies.
35
  Scheme 0 writes a custom mapping from codeword to MB mode to the bitstream,
36
   while schemes 1-6 have a fixed mapping.
37
  Scheme 7 just encodes each mode directly in 3 bits.*/
38
39
/*The mode orderings for the various mode coding schemes.
40
  Scheme 0 uses a custom alphabet, which is not stored in this table.
41
  This is the inverse of the equivalent table OC_MODE_ALPHABETS in the
42
   decoder.*/
43
static const unsigned char OC_MODE_RANKS[7][OC_NMODES]={
44
  /*Last MV dominates.*/
45
  /*L P M N I G GM 4*/
46
  {3,4,2,0,1,5,6,7},
47
  /*L P N M I G GM 4*/
48
  {2,4,3,0,1,5,6,7},
49
  /*L M P N I G GM 4*/
50
  {3,4,1,0,2,5,6,7},
51
  /*L M N P I G GM 4*/
52
  {2,4,1,0,3,5,6,7},
53
  /*No MV dominates.*/
54
  /*N L P M I G GM 4*/
55
  {0,4,3,1,2,5,6,7},
56
  /*N G L P M I GM 4*/
57
  {0,5,4,2,3,1,6,7},
58
  /*Default ordering.*/
59
  /*N I M L P G GM 4*/
60
  {0,1,2,3,4,5,6,7}
61
};
62
63
64
65
/*Initialize the mode scheme chooser.
66
  This need only be called once per encoder.*/
67
3.04k
void oc_mode_scheme_chooser_init(oc_mode_scheme_chooser *_chooser){
68
3.04k
  int si;
69
3.04k
  _chooser->mode_ranks[0]=_chooser->scheme0_ranks;
70
24.3k
  for(si=1;si<8;si++)_chooser->mode_ranks[si]=OC_MODE_RANKS[si-1];
71
3.04k
}
72
73
/*Reset the mode scheme chooser.
74
  This needs to be called once for each frame, including the first.*/
75
39.1k
static void oc_mode_scheme_chooser_reset(oc_mode_scheme_chooser *_chooser){
76
39.1k
  int si;
77
39.1k
  memset(_chooser->mode_counts,0,OC_NMODES*sizeof(*_chooser->mode_counts));
78
  /*Scheme 0 starts with 24 bits to store the mode list in.*/
79
39.1k
  _chooser->scheme_bits[0]=24;
80
39.1k
  memset(_chooser->scheme_bits+1,0,7*sizeof(*_chooser->scheme_bits));
81
352k
  for(si=0;si<8;si++){
82
    /*Scheme 7 should always start first, and scheme 0 should always start
83
       last.*/
84
313k
    _chooser->scheme_list[si]=7-si;
85
313k
    _chooser->scheme0_list[si]=_chooser->scheme0_ranks[si]=si;
86
313k
  }
87
39.1k
}
88
89
/*Return the cost of coding _mb_mode in the specified scheme.*/
90
static int oc_mode_scheme_chooser_scheme_mb_cost(
91
10.4M
 const oc_mode_scheme_chooser *_chooser,int _scheme,int _mb_mode){
92
10.4M
  int codebook;
93
10.4M
  int ri;
94
10.4M
  codebook=_scheme+1>>3;
95
  /*For any scheme except 0, we can just use the bit cost of the mode's rank
96
     in that scheme.*/
97
10.4M
  ri=_chooser->mode_ranks[_scheme][_mb_mode];
98
10.4M
  if(_scheme==0){
99
1.71M
    int mc;
100
    /*For scheme 0, incrementing the mode count could potentially change the
101
       mode's rank.
102
      Find the index where the mode would be moved to in the optimal list,
103
       and use its bit cost instead of the one for the mode's current
104
       position in the list.*/
105
    /*We don't actually reorder the list; this is for computing opportunity
106
       cost, not an update.*/
107
1.71M
    mc=_chooser->mode_counts[_mb_mode];
108
4.27M
    while(ri>0&&mc>=_chooser->mode_counts[_chooser->scheme0_list[ri-1]])ri--;
109
1.71M
  }
110
10.4M
  return OC_MODE_BITS[codebook][ri];
111
10.4M
}
112
113
/*This is the real purpose of this data structure: not actually selecting a
114
   mode scheme, but estimating the cost of coding a given mode given all the
115
   modes selected so far.
116
  This is done via opportunity cost: the cost is defined as the number of bits
117
   required to encode all the modes selected so far including the current one
118
   using the best possible scheme, minus the number of bits required to encode
119
   all the modes selected so far not including the current one using the best
120
   possible scheme.
121
  The computational expense of doing this probably makes it overkill.
122
  Just be happy we take a greedy approach instead of trying to solve the
123
   global mode-selection problem (which is NP-hard).
124
  _mb_mode: The mode to determine the cost of.
125
  Return: The number of bits required to code this mode.*/
126
static int oc_mode_scheme_chooser_cost(oc_mode_scheme_chooser *_chooser,
127
3.20M
 int _mb_mode){
128
3.20M
  int scheme0;
129
3.20M
  int scheme1;
130
3.20M
  int best_bits;
131
3.20M
  int mode_bits;
132
3.20M
  int si;
133
3.20M
  int scheme0_bits;
134
3.20M
  int scheme1_bits;
135
3.20M
  scheme0=_chooser->scheme_list[0];
136
3.20M
  scheme1=_chooser->scheme_list[1];
137
3.20M
  scheme0_bits=_chooser->scheme_bits[scheme0];
138
3.20M
  scheme1_bits=_chooser->scheme_bits[scheme1];
139
3.20M
  mode_bits=oc_mode_scheme_chooser_scheme_mb_cost(_chooser,scheme0,_mb_mode);
140
  /*Typical case: If the difference between the best scheme and the next best
141
     is greater than 6 bits, then adding just one mode cannot change which
142
     scheme we use.*/
143
3.20M
  if(scheme1_bits-scheme0_bits>6)return mode_bits;
144
  /*Otherwise, check to see if adding this mode selects a different scheme as
145
     the best.*/
146
1.38M
  si=1;
147
1.38M
  best_bits=scheme0_bits+mode_bits;
148
7.23M
  do{
149
7.23M
    int cur_bits;
150
7.23M
    cur_bits=scheme1_bits+
151
7.23M
     oc_mode_scheme_chooser_scheme_mb_cost(_chooser,scheme1,_mb_mode);
152
7.23M
    if(cur_bits<best_bits)best_bits=cur_bits;
153
7.23M
    if(++si>=8)break;
154
7.23M
    scheme1=_chooser->scheme_list[si];
155
7.23M
    scheme1_bits=_chooser->scheme_bits[scheme1];
156
7.23M
  }
157
7.23M
  while(scheme1_bits-scheme0_bits<=6);
158
1.38M
  return best_bits-scheme0_bits;
159
3.20M
}
160
161
/*Incrementally update the mode counts and per-scheme bit counts and re-order
162
   the scheme lists once a mode has been selected.
163
  _mb_mode: The mode that was chosen.*/
164
static void oc_mode_scheme_chooser_update(oc_mode_scheme_chooser *_chooser,
165
237k
 int _mb_mode){
166
237k
  int ri;
167
237k
  int si;
168
237k
  _chooser->mode_counts[_mb_mode]++;
169
  /*Re-order the scheme0 mode list if necessary.*/
170
317k
  for(ri=_chooser->scheme0_ranks[_mb_mode];ri>0;ri--){
171
116k
    int pmode;
172
116k
    pmode=_chooser->scheme0_list[ri-1];
173
116k
    if(_chooser->mode_counts[pmode]>=_chooser->mode_counts[_mb_mode])break;
174
    /*Reorder the mode ranking.*/
175
79.5k
    _chooser->scheme0_ranks[pmode]++;
176
79.5k
    _chooser->scheme0_list[ri]=pmode;
177
79.5k
  }
178
237k
  _chooser->scheme0_ranks[_mb_mode]=ri;
179
237k
  _chooser->scheme0_list[ri]=_mb_mode;
180
  /*Now add the bit cost for the mode to each scheme.*/
181
2.14M
  for(si=0;si<8;si++){
182
1.90M
    _chooser->scheme_bits[si]+=
183
1.90M
     OC_MODE_BITS[si+1>>3][_chooser->mode_ranks[si][_mb_mode]];
184
1.90M
  }
185
  /*Finally, re-order the list of schemes.*/
186
1.90M
  for(si=1;si<8;si++){
187
1.66M
    int sj;
188
1.66M
    int scheme0;
189
1.66M
    int bits0;
190
1.66M
    sj=si;
191
1.66M
    scheme0=_chooser->scheme_list[si];
192
1.66M
    bits0=_chooser->scheme_bits[scheme0];
193
1.84M
    do{
194
1.84M
      int scheme1;
195
1.84M
      scheme1=_chooser->scheme_list[sj-1];
196
1.84M
      if(bits0>=_chooser->scheme_bits[scheme1])break;
197
192k
      _chooser->scheme_list[sj]=scheme1;
198
192k
    }
199
1.66M
    while(--sj>0);
200
1.66M
    _chooser->scheme_list[sj]=scheme0;
201
1.66M
  }
202
237k
}
203
204
205
206
/*The number of bits required to encode a super block run.
207
  _run_count: The desired run count; must be positive and less than 4130.*/
208
162M
static int oc_sb_run_bits(int _run_count){
209
162M
  int i;
210
596M
  for(i=0;_run_count>=OC_SB_RUN_VAL_MIN[i+1];i++);
211
162M
  return OC_SB_RUN_CODE_NBITS[i];
212
162M
}
213
214
/*The number of bits required to encode a block run.
215
  _run_count: The desired run count; must be positive and less than 30.*/
216
21.5M
static int oc_block_run_bits(int _run_count){
217
21.5M
  return OC_BLOCK_RUN_CODE_NBITS[_run_count-1];
218
21.5M
}
219
220
221
222
195k
static void oc_fr_state_init(oc_fr_state *_fr){
223
195k
  _fr->bits=0;
224
195k
  _fr->sb_partial_count=0;
225
195k
  _fr->sb_full_count=0;
226
195k
  _fr->b_coded_count_prev=0;
227
195k
  _fr->b_coded_count=0;
228
195k
  _fr->b_count=0;
229
195k
  _fr->sb_prefer_partial=0;
230
195k
  _fr->sb_bits=0;
231
195k
  _fr->sb_partial=-1;
232
195k
  _fr->sb_full=-1;
233
195k
  _fr->b_coded_prev=-1;
234
195k
  _fr->b_coded=-1;
235
195k
}
236
237
238
static int oc_fr_state_sb_cost(const oc_fr_state *_fr,
239
10.4M
 int _sb_partial,int _sb_full){
240
10.4M
  int bits;
241
10.4M
  int sb_partial_count;
242
10.4M
  int sb_full_count;
243
10.4M
  bits=0;
244
10.4M
  sb_partial_count=_fr->sb_partial_count;
245
  /*Extend the sb_partial run, or start a new one.*/
246
10.4M
  if(_fr->sb_partial==_sb_partial){
247
2.12M
    if(sb_partial_count>=4129){
248
0
      bits++;
249
0
      sb_partial_count=0;
250
0
    }
251
2.12M
    else bits-=oc_sb_run_bits(sb_partial_count);
252
2.12M
  }
253
8.31M
  else sb_partial_count=0;
254
10.4M
  bits+=oc_sb_run_bits(++sb_partial_count);
255
10.4M
  if(!_sb_partial){
256
    /*Extend the sb_full run, or start a new one.*/
257
3.28M
    sb_full_count=_fr->sb_full_count;
258
3.28M
    if(_fr->sb_full==_sb_full){
259
1.28M
      if(sb_full_count>=4129){
260
0
        bits++;
261
0
        sb_full_count=0;
262
0
      }
263
1.28M
      else bits-=oc_sb_run_bits(sb_full_count);
264
1.28M
    }
265
2.00M
    else sb_full_count=0;
266
3.28M
    bits+=oc_sb_run_bits(++sb_full_count);
267
3.28M
  }
268
10.4M
  return bits;
269
10.4M
}
270
271
static void oc_fr_state_advance_sb(oc_fr_state *_fr,
272
229k
 int _sb_partial,int _sb_full){
273
229k
  int sb_partial_count;
274
229k
  int sb_full_count;
275
229k
  sb_partial_count=_fr->sb_partial_count;
276
229k
  if(_fr->sb_partial!=_sb_partial||sb_partial_count>=4129)sb_partial_count=0;
277
229k
  sb_partial_count++;
278
229k
  if(!_sb_partial){
279
165k
    sb_full_count=_fr->sb_full_count;
280
165k
    if(_fr->sb_full!=_sb_full||sb_full_count>=4129)sb_full_count=0;
281
165k
    sb_full_count++;
282
165k
    _fr->sb_full_count=sb_full_count;
283
165k
    _fr->sb_full=_sb_full;
284
    /*Roll back the partial block state.*/
285
165k
    _fr->b_coded=_fr->b_coded_prev;
286
165k
    _fr->b_coded_count=_fr->b_coded_count_prev;
287
165k
  }
288
63.5k
  else{
289
    /*Commit back the partial block state.*/
290
63.5k
    _fr->b_coded_prev=_fr->b_coded;
291
63.5k
    _fr->b_coded_count_prev=_fr->b_coded_count;
292
63.5k
  }
293
229k
  _fr->sb_partial_count=sb_partial_count;
294
229k
  _fr->sb_partial=_sb_partial;
295
229k
  _fr->b_count=0;
296
229k
  _fr->sb_prefer_partial=0;
297
229k
  _fr->sb_bits=0;
298
229k
}
299
300
/*Commit the state of the current super block and advance to the next.*/
301
229k
static void oc_fr_state_flush_sb(oc_fr_state *_fr){
302
229k
  int sb_partial;
303
229k
  int sb_full;
304
229k
  int b_coded_count;
305
229k
  int b_count;
306
229k
  b_count=_fr->b_count;
307
229k
  b_coded_count=_fr->b_coded_count;
308
229k
  sb_full=_fr->b_coded;
309
229k
  sb_partial=b_coded_count<b_count;
310
229k
  if(!sb_partial){
311
    /*If the super block is fully coded/uncoded...*/
312
166k
    if(_fr->sb_prefer_partial){
313
      /*So far coding this super block as partial was cheaper anyway.*/
314
1.94k
      if(b_coded_count>15||_fr->b_coded_prev<0){
315
1.14k
        int sb_bits;
316
        /*If the block run is too long, this will limit how far it can be
317
           extended into the next partial super block.
318
          If we need to extend it farther, we don't want to have to roll all
319
           the way back here (since there could be many full SBs between now
320
           and then), so we disallow this.
321
          Similarly, if this is the start of a stripe, we don't know how the
322
           length of the outstanding block run from the previous stripe.*/
323
1.14k
        sb_bits=oc_fr_state_sb_cost(_fr,sb_partial,sb_full);
324
1.14k
        _fr->bits+=sb_bits-_fr->sb_bits;
325
1.14k
        _fr->sb_bits=sb_bits;
326
1.14k
      }
327
797
      else sb_partial=1;
328
1.94k
    }
329
166k
  }
330
229k
  oc_fr_state_advance_sb(_fr,sb_partial,sb_full);
331
229k
}
332
333
24.8M
static void oc_fr_state_advance_block(oc_fr_state *_fr,int _b_coded){
334
24.8M
  ptrdiff_t bits;
335
24.8M
  int       sb_bits;
336
24.8M
  int       b_coded_count;
337
24.8M
  int       b_count;
338
24.8M
  int       sb_prefer_partial;
339
24.8M
  sb_bits=_fr->sb_bits;
340
24.8M
  bits=_fr->bits-sb_bits;
341
24.8M
  b_count=_fr->b_count;
342
24.8M
  b_coded_count=_fr->b_coded_count;
343
24.8M
  sb_prefer_partial=_fr->sb_prefer_partial;
344
24.8M
  if(b_coded_count>=b_count){
345
17.3M
    int sb_partial_bits;
346
    /*This super block is currently fully coded/uncoded.*/
347
17.3M
    if(b_count<=0){
348
      /*This is the first block in this SB.*/
349
2.29M
      b_count=1;
350
      /*Check to see whether it's cheaper to code it partially or fully.*/
351
2.29M
      if(_fr->b_coded==_b_coded){
352
541k
        sb_partial_bits=-oc_block_run_bits(b_coded_count);
353
541k
        sb_partial_bits+=oc_block_run_bits(++b_coded_count);
354
541k
      }
355
1.75M
      else{
356
1.75M
        b_coded_count=1;
357
1.75M
        sb_partial_bits=2;
358
1.75M
      }
359
2.29M
      sb_partial_bits+=oc_fr_state_sb_cost(_fr,1,_b_coded);
360
2.29M
      sb_bits=oc_fr_state_sb_cost(_fr,0,_b_coded);
361
2.29M
      sb_prefer_partial=sb_partial_bits<sb_bits;
362
2.29M
      sb_bits^=(sb_partial_bits^sb_bits)&-sb_prefer_partial;
363
2.29M
    }
364
15.0M
    else if(_fr->b_coded==_b_coded){
365
9.57M
      b_coded_count++;
366
9.57M
      if(++b_count<16){
367
9.21M
        if(sb_prefer_partial){
368
          /*Check to see if it's cheaper to code it fully.*/
369
968k
          sb_partial_bits=sb_bits;
370
968k
          sb_partial_bits+=oc_block_run_bits(b_coded_count);
371
968k
          if(b_coded_count>0){
372
968k
            sb_partial_bits-=oc_block_run_bits(b_coded_count-1);
373
968k
          }
374
968k
          sb_bits=oc_fr_state_sb_cost(_fr,0,_b_coded);
375
968k
          sb_prefer_partial=sb_partial_bits<sb_bits;
376
968k
          sb_bits^=(sb_partial_bits^sb_bits)&-sb_prefer_partial;
377
968k
        }
378
        /*There's no need to check the converse (whether it's cheaper to code
379
           this SB partially if we were coding it fully), since the cost to
380
           code a SB partially can only increase as we add more blocks, whereas
381
           the cost to code it fully stays constant.*/
382
9.21M
      }
383
362k
      else{
384
        /*If we get to the end and this SB is still full, then force it to be
385
           coded full.
386
          Otherwise we might not be able to extend the block run far enough
387
           into the next partial SB.*/
388
362k
        if(sb_prefer_partial){
389
24.3k
          sb_prefer_partial=0;
390
24.3k
          sb_bits=oc_fr_state_sb_cost(_fr,0,_b_coded);
391
24.3k
        }
392
362k
      }
393
9.57M
    }
394
5.45M
    else{
395
      /*This SB was full, but now must be made partial.*/
396
5.45M
      if(!sb_prefer_partial){
397
4.85M
        sb_bits=oc_block_run_bits(b_coded_count);
398
4.85M
        if(b_coded_count>b_count){
399
1.27M
          sb_bits-=oc_block_run_bits(b_coded_count-b_count);
400
1.27M
        }
401
4.85M
        sb_bits+=oc_fr_state_sb_cost(_fr,1,_b_coded);
402
4.85M
      }
403
5.45M
      b_count++;
404
5.45M
      b_coded_count=1;
405
5.45M
      sb_prefer_partial=1;
406
5.45M
      sb_bits+=2;
407
5.45M
    }
408
17.3M
  }
409
7.49M
  else{
410
7.49M
    b_count++;
411
7.49M
    if(_fr->b_coded==_b_coded)sb_bits-=oc_block_run_bits(b_coded_count);
412
2.62M
    else b_coded_count=0;
413
7.49M
    sb_bits+=oc_block_run_bits(++b_coded_count);
414
7.49M
  }
415
24.8M
  _fr->bits=bits+sb_bits;
416
24.8M
  _fr->b_coded_count=b_coded_count;
417
24.8M
  _fr->b_coded=_b_coded;
418
24.8M
  _fr->b_count=b_count;
419
24.8M
  _fr->sb_prefer_partial=sb_prefer_partial;
420
24.8M
  _fr->sb_bits=sb_bits;
421
24.8M
}
422
423
9.31M
static void oc_fr_skip_block(oc_fr_state *_fr){
424
9.31M
  oc_fr_state_advance_block(_fr,0);
425
9.31M
}
426
427
15.5M
static void oc_fr_code_block(oc_fr_state *_fr){
428
15.5M
  oc_fr_state_advance_block(_fr,1);
429
15.5M
}
430
431
1.44M
static int oc_fr_cost1(const oc_fr_state *_fr){
432
1.44M
  oc_fr_state tmp;
433
1.44M
  ptrdiff_t   bits;
434
1.44M
  *&tmp=*_fr;
435
1.44M
  oc_fr_skip_block(&tmp);
436
1.44M
  bits=tmp.bits;
437
1.44M
  *&tmp=*_fr;
438
1.44M
  oc_fr_code_block(&tmp);
439
1.44M
  return (int)(tmp.bits-bits);
440
1.44M
}
441
442
244k
static int oc_fr_cost4(const oc_fr_state *_pre,const oc_fr_state *_post){
443
244k
  oc_fr_state tmp;
444
244k
  *&tmp=*_pre;
445
244k
  oc_fr_skip_block(&tmp);
446
244k
  oc_fr_skip_block(&tmp);
447
244k
  oc_fr_skip_block(&tmp);
448
244k
  oc_fr_skip_block(&tmp);
449
244k
  return (int)(_post->bits-tmp.bits);
450
244k
}
451
452
453
454
235k
static void oc_qii_state_init(oc_qii_state *_qs){
455
235k
  _qs->bits=0;
456
235k
  _qs->qi01_count=0;
457
235k
  _qs->qi01=-1;
458
235k
  _qs->qi12_count=0;
459
235k
  _qs->qi12=-1;
460
235k
}
461
462
463
static void oc_qii_state_advance(oc_qii_state *_qd,
464
63.9M
 const oc_qii_state *_qs,int _qii){
465
63.9M
  ptrdiff_t bits;
466
63.9M
  int       qi01;
467
63.9M
  int       qi01_count;
468
63.9M
  int       qi12;
469
63.9M
  int       qi12_count;
470
63.9M
  bits=_qs->bits;
471
63.9M
  qi01=_qii+1>>1;
472
63.9M
  qi01_count=_qs->qi01_count;
473
63.9M
  if(qi01==_qs->qi01){
474
37.2M
    if(qi01_count>=4129){
475
2.76k
      bits++;
476
2.76k
      qi01_count=0;
477
2.76k
    }
478
37.2M
    else bits-=oc_sb_run_bits(qi01_count);
479
37.2M
  }
480
26.7M
  else qi01_count=0;
481
63.9M
  qi01_count++;
482
63.9M
  bits+=oc_sb_run_bits(qi01_count);
483
63.9M
  qi12_count=_qs->qi12_count;
484
63.9M
  if(_qii){
485
28.4M
    qi12=_qii>>1;
486
28.4M
    if(qi12==_qs->qi12){
487
15.4M
      if(qi12_count>=4129){
488
15.0k
        bits++;
489
15.0k
        qi12_count=0;
490
15.0k
      }
491
15.4M
      else bits-=oc_sb_run_bits(qi12_count);
492
15.4M
    }
493
13.0M
    else qi12_count=0;
494
28.4M
    qi12_count++;
495
28.4M
    bits+=oc_sb_run_bits(qi12_count);
496
28.4M
  }
497
35.4M
  else qi12=_qs->qi12;
498
63.9M
  _qd->bits=bits;
499
63.9M
  _qd->qi01=qi01;
500
63.9M
  _qd->qi01_count=qi01_count;
501
63.9M
  _qd->qi12=qi12;
502
63.9M
  _qd->qi12_count=qi12_count;
503
63.9M
}
504
505
506
507
65.3k
static void oc_enc_pipeline_init(oc_enc_ctx *_enc,oc_enc_pipeline_state *_pipe){
508
65.3k
  ptrdiff_t *coded_fragis;
509
65.3k
  unsigned   mcu_nvsbs;
510
65.3k
  ptrdiff_t  mcu_nfrags;
511
65.3k
  int        flimit;
512
65.3k
  int        hdec;
513
65.3k
  int        vdec;
514
65.3k
  int        pli;
515
65.3k
  int        nqis;
516
65.3k
  int        qii;
517
65.3k
  int        qi0;
518
65.3k
  int        qti;
519
  /*Initialize the per-plane coded block flag trackers.
520
    These are used for bit-estimation purposes only; the real flag bits span
521
     all three planes, so we can't compute them in parallel.*/
522
261k
  for(pli=0;pli<3;pli++)oc_fr_state_init(_pipe->fr+pli);
523
261k
  for(pli=0;pli<3;pli++)oc_qii_state_init(_pipe->qs+pli);
524
  /*Set up the per-plane skip SSD storage pointers.*/
525
65.3k
  mcu_nvsbs=_enc->mcu_nvsbs;
526
65.3k
  mcu_nfrags=mcu_nvsbs*_enc->state.fplanes[0].nhsbs*16;
527
65.3k
  hdec=!(_enc->state.info.pixel_fmt&1);
528
65.3k
  vdec=!(_enc->state.info.pixel_fmt&2);
529
65.3k
  _pipe->skip_ssd[0]=_enc->mcu_skip_ssd;
530
65.3k
  _pipe->skip_ssd[1]=_pipe->skip_ssd[0]+mcu_nfrags;
531
65.3k
  _pipe->skip_ssd[2]=_pipe->skip_ssd[1]+(mcu_nfrags>>hdec+vdec);
532
  /*Set up per-plane pointers to the coded and uncoded fragments lists.
533
    Unlike the decoder, each planes' coded and uncoded fragment list is kept
534
     separate during the analysis stage; we only make the coded list for all
535
     three planes contiguous right before the final packet is output
536
     (destroying the uncoded lists, which are no longer needed).*/
537
65.3k
  coded_fragis=_enc->state.coded_fragis;
538
261k
  for(pli=0;pli<3;pli++){
539
195k
    _pipe->coded_fragis[pli]=coded_fragis;
540
195k
    coded_fragis+=_enc->state.fplanes[pli].nfrags;
541
195k
    _pipe->uncoded_fragis[pli]=coded_fragis;
542
195k
  }
543
65.3k
  memset(_pipe->ncoded_fragis,0,sizeof(_pipe->ncoded_fragis));
544
65.3k
  memset(_pipe->nuncoded_fragis,0,sizeof(_pipe->nuncoded_fragis));
545
  /*Set up condensed quantizer tables.*/
546
65.3k
  qi0=_enc->state.qis[0];
547
65.3k
  nqis=_enc->state.nqis;
548
261k
  for(pli=0;pli<3;pli++){
549
554k
    for(qii=0;qii<nqis;qii++){
550
358k
      int qi;
551
358k
      qi=_enc->state.qis[qii];
552
1.07M
      for(qti=0;qti<2;qti++){
553
        /*Set the DC coefficient in the dequantization table.*/
554
717k
        _enc->state.dequant_tables[qi][pli][qti][0]=
555
717k
         _enc->dequant_dc[qi0][pli][qti];
556
717k
        _enc->dequant[pli][qii][qti]=_enc->state.dequant_tables[qi][pli][qti];
557
        /*Copy over the quantization table.*/
558
717k
        memcpy(_enc->enquant[pli][qii][qti],_enc->enquant_tables[qi][pli][qti],
559
717k
         _enc->opt_data.enquant_table_size);
560
717k
      }
561
358k
    }
562
195k
  }
563
  /*Fix up the DC coefficients in the quantization tables.*/
564
65.3k
  oc_enc_enquant_table_fixup(_enc,_enc->enquant,nqis);
565
  /*Initialize the tokenization state.*/
566
261k
  for(pli=0;pli<3;pli++){
567
195k
    _pipe->ndct_tokens1[pli]=0;
568
195k
    _pipe->eob_run1[pli]=0;
569
195k
  }
570
  /*Initialize the bounding value array for the loop filter.*/
571
65.3k
  flimit=_enc->state.loop_filter_limits[_enc->state.qis[0]];
572
65.3k
  _pipe->loop_filter=flimit!=0;
573
65.3k
  if(flimit!=0)oc_loop_filter_init(&_enc->state,_pipe->bounding_values,flimit);
574
  /*Clear the temporary DCT scratch space.*/
575
65.3k
  memset(_pipe->dct_data,0,sizeof(_pipe->dct_data));
576
65.3k
}
577
578
/*Sets the current MCU stripe to super block row _sby.
579
  Return: A non-zero value if this was the last MCU.*/
580
static int oc_enc_pipeline_set_stripe(oc_enc_ctx *_enc,
581
213k
 oc_enc_pipeline_state *_pipe,int _sby){
582
213k
  const oc_fragment_plane *fplane;
583
213k
  unsigned                 mcu_nvsbs;
584
213k
  int                      sby_end;
585
213k
  int                      notdone;
586
213k
  int                      vdec;
587
213k
  int                      pli;
588
213k
  mcu_nvsbs=_enc->mcu_nvsbs;
589
213k
  sby_end=_enc->state.fplanes[0].nvsbs;
590
213k
  notdone=_sby+mcu_nvsbs<sby_end;
591
213k
  if(notdone)sby_end=_sby+mcu_nvsbs;
592
213k
  vdec=0;
593
852k
  for(pli=0;pli<3;pli++){
594
639k
    fplane=_enc->state.fplanes+pli;
595
639k
    _pipe->sbi0[pli]=fplane->sboffset+(_sby>>vdec)*fplane->nhsbs;
596
639k
    _pipe->fragy0[pli]=_sby<<2-vdec;
597
639k
    _pipe->froffset[pli]=fplane->froffset
598
639k
     +_pipe->fragy0[pli]*(ptrdiff_t)fplane->nhfrags;
599
639k
    if(notdone){
600
443k
      _pipe->sbi_end[pli]=fplane->sboffset+(sby_end>>vdec)*fplane->nhsbs;
601
443k
      _pipe->fragy_end[pli]=sby_end<<2-vdec;
602
443k
    }
603
195k
    else{
604
195k
      _pipe->sbi_end[pli]=fplane->sboffset+fplane->nsbs;
605
195k
      _pipe->fragy_end[pli]=fplane->nvfrags;
606
195k
    }
607
639k
    vdec=!(_enc->state.info.pixel_fmt&2);
608
639k
  }
609
213k
  return notdone;
610
213k
}
611
612
static void oc_enc_pipeline_finish_mcu_plane(oc_enc_ctx *_enc,
613
639k
 oc_enc_pipeline_state *_pipe,int _pli,int _sdelay,int _edelay){
614
  /*Copy over all the uncoded fragments from this plane and advance the uncoded
615
     fragment list.*/
616
639k
  if(_pipe->nuncoded_fragis[_pli]>0){
617
61.1k
    _pipe->uncoded_fragis[_pli]-=_pipe->nuncoded_fragis[_pli];
618
61.1k
    oc_frag_copy_list(&_enc->state,
619
61.1k
     _enc->state.ref_frame_data[OC_FRAME_SELF],
620
61.1k
     _enc->state.ref_frame_data[OC_FRAME_PREV],
621
61.1k
     _enc->state.ref_ystride[_pli],_pipe->uncoded_fragis[_pli],
622
61.1k
     _pipe->nuncoded_fragis[_pli],_enc->state.frag_buf_offs);
623
61.1k
    _pipe->nuncoded_fragis[_pli]=0;
624
61.1k
  }
625
  /*Perform DC prediction.*/
626
639k
  oc_enc_pred_dc_frag_rows(_enc,_pli,
627
639k
   _pipe->fragy0[_pli],_pipe->fragy_end[_pli]);
628
  /*Finish DC tokenization.*/
629
639k
  oc_enc_tokenize_dc_frag_list(_enc,_pli,
630
639k
   _pipe->coded_fragis[_pli],_pipe->ncoded_fragis[_pli],
631
639k
   _pipe->ndct_tokens1[_pli],_pipe->eob_run1[_pli]);
632
639k
  _pipe->ndct_tokens1[_pli]=_enc->ndct_tokens[_pli][1];
633
639k
  _pipe->eob_run1[_pli]=_enc->eob_run[_pli][1];
634
  /*And advance the coded fragment list.*/
635
639k
  _enc->state.ncoded_fragis[_pli]+=_pipe->ncoded_fragis[_pli];
636
639k
  _pipe->coded_fragis[_pli]+=_pipe->ncoded_fragis[_pli];
637
639k
  _pipe->ncoded_fragis[_pli]=0;
638
  /*Apply the loop filter if necessary.*/
639
639k
  if(_pipe->loop_filter){
640
387k
    oc_state_loop_filter_frag_rows(&_enc->state,
641
387k
     _pipe->bounding_values,OC_FRAME_SELF,_pli,
642
387k
     _pipe->fragy0[_pli]-_sdelay,_pipe->fragy_end[_pli]-_edelay);
643
387k
  }
644
251k
  else _sdelay=_edelay=0;
645
  /*To fill borders, we have an additional two pixel delay, since a fragment
646
     in the next row could filter its top edge, using two pixels from a
647
     fragment in this row.
648
    But there's no reason to delay a full fragment between the two.*/
649
639k
  oc_state_borders_fill_rows(&_enc->state,
650
639k
   _enc->state.ref_frame_idx[OC_FRAME_SELF],_pli,
651
639k
   (_pipe->fragy0[_pli]-_sdelay<<3)-(_sdelay<<1),
652
639k
   (_pipe->fragy_end[_pli]-_edelay<<3)-(_edelay<<1));
653
639k
}
654
655
656
657
/*Cost information about the coded blocks in a MB.*/
658
struct oc_rd_metric{
659
  int uncoded_ac_ssd;
660
  int coded_ac_ssd;
661
  int ac_bits;
662
  int dc_flag;
663
};
664
665
666
667
static int oc_enc_block_transform_quantize(oc_enc_ctx *_enc,
668
 oc_enc_pipeline_state *_pipe,int _pli,ptrdiff_t _fragi,
669
 unsigned _rd_scale,unsigned _rd_iscale,oc_rd_metric *_mo,
670
20.4M
 oc_fr_state *_fr,oc_token_checkpoint **_stack){
671
20.4M
  ogg_int16_t            *data;
672
20.4M
  ogg_int16_t            *dct;
673
20.4M
  ogg_int16_t            *idct;
674
20.4M
  oc_qii_state            qs;
675
20.4M
  const ogg_uint16_t     *dequant;
676
20.4M
  ogg_uint16_t            dequant_dc;
677
20.4M
  ptrdiff_t               frag_offs;
678
20.4M
  int                     ystride;
679
20.4M
  const unsigned char    *src;
680
20.4M
  const unsigned char    *ref;
681
20.4M
  unsigned char          *dst;
682
20.4M
  int                     nonzero;
683
20.4M
  unsigned                uncoded_ssd;
684
20.4M
  unsigned                coded_ssd;
685
20.4M
  oc_token_checkpoint    *checkpoint;
686
20.4M
  oc_fragment            *frags;
687
20.4M
  int                     mb_mode;
688
20.4M
  int                     refi;
689
20.4M
  int                     mv_offs[2];
690
20.4M
  int                     nmv_offs;
691
20.4M
  int                     ac_bits;
692
20.4M
  int                     borderi;
693
20.4M
  int                     nqis;
694
20.4M
  int                     qti;
695
20.4M
  int                     qii;
696
20.4M
  int                     dc;
697
20.4M
  nqis=_enc->state.nqis;
698
20.4M
  frags=_enc->state.frags;
699
20.4M
  frag_offs=_enc->state.frag_buf_offs[_fragi];
700
20.4M
  ystride=_enc->state.ref_ystride[_pli];
701
20.4M
  src=_enc->state.ref_frame_data[OC_FRAME_IO]+frag_offs;
702
20.4M
  borderi=frags[_fragi].borderi;
703
20.4M
  qii=frags[_fragi].qii;
704
20.4M
  data=_enc->pipe.dct_data;
705
20.4M
  dct=data+64;
706
20.4M
  idct=data+128;
707
20.4M
  if(qii&~3){
708
474k
#if !defined(OC_COLLECT_METRICS)
709
474k
    if(_enc->sp_level>=OC_SP_LEVEL_EARLY_SKIP){
710
      /*Enable early skip detection.*/
711
474k
      frags[_fragi].coded=0;
712
474k
      frags[_fragi].refi=OC_FRAME_NONE;
713
474k
      oc_fr_skip_block(_fr);
714
474k
      return 0;
715
474k
    }
716
0
#endif
717
    /*Try and code this block anyway.*/
718
0
    qii&=3;
719
0
  }
720
20.0M
  refi=frags[_fragi].refi;
721
20.0M
  mb_mode=frags[_fragi].mb_mode;
722
20.0M
  ref=_enc->state.ref_frame_data[refi]+frag_offs;
723
20.0M
  dst=_enc->state.ref_frame_data[OC_FRAME_SELF]+frag_offs;
724
  /*Motion compensation:*/
725
20.0M
  switch(mb_mode){
726
19.5M
    case OC_MODE_INTRA:{
727
19.5M
      nmv_offs=0;
728
19.5M
      oc_enc_frag_sub_128(_enc,data,src,ystride);
729
19.5M
    }break;
730
17.3k
    case OC_MODE_GOLDEN_NOMV:
731
145k
    case OC_MODE_INTER_NOMV:{
732
145k
      nmv_offs=1;
733
145k
      mv_offs[0]=0;
734
145k
      oc_enc_frag_sub(_enc,data,src,ref,ystride);
735
145k
    }break;
736
306k
    default:{
737
306k
      const oc_mv *frag_mvs;
738
306k
      frag_mvs=_enc->state.frag_mvs;
739
306k
      nmv_offs=oc_state_get_mv_offsets(&_enc->state,mv_offs,
740
306k
       _pli,frag_mvs[_fragi]);
741
306k
      if(nmv_offs>1){
742
262k
        oc_enc_frag_copy2(_enc,dst,
743
262k
         ref+mv_offs[0],ref+mv_offs[1],ystride);
744
262k
        oc_enc_frag_sub(_enc,data,src,dst,ystride);
745
262k
      }
746
43.7k
      else oc_enc_frag_sub(_enc,data,src,ref+mv_offs[0],ystride);
747
306k
    }break;
748
20.0M
  }
749
#if defined(OC_COLLECT_METRICS)
750
  {
751
    unsigned sad;
752
    unsigned satd;
753
    switch(nmv_offs){
754
      case 0:{
755
        sad=oc_enc_frag_intra_sad(_enc,src,ystride);
756
        satd=oc_enc_frag_intra_satd(_enc,&dc,src,ystride);
757
      }break;
758
      case 1:{
759
        sad=oc_enc_frag_sad_thresh(_enc,src,ref+mv_offs[0],ystride,UINT_MAX);
760
        satd=oc_enc_frag_satd(_enc,&dc,src,ref+mv_offs[0],ystride);
761
        satd+=abs(dc);
762
      }break;
763
      default:{
764
        sad=oc_enc_frag_sad_thresh(_enc,src,dst,ystride,UINT_MAX);
765
        satd=oc_enc_frag_satd(_enc,&dc,src,dst,ystride);
766
        satd+=abs(dc);
767
      }break;
768
    }
769
    _enc->frag_sad[_fragi]=sad;
770
    _enc->frag_satd[_fragi]=satd;
771
  }
772
#endif
773
  /*Transform:*/
774
20.0M
  oc_enc_fdct8x8(_enc,dct,data);
775
  /*Quantize:*/
776
20.0M
  qti=mb_mode!=OC_MODE_INTRA;
777
20.0M
  dequant=_enc->dequant[_pli][qii][qti];
778
20.0M
  nonzero=oc_enc_quantize(_enc,data,dct,dequant,_enc->enquant[_pli][qii][qti]);
779
20.0M
  dc=data[0];
780
  /*Tokenize.*/
781
20.0M
  checkpoint=*_stack;
782
20.0M
  if(_enc->sp_level<OC_SP_LEVEL_FAST_ANALYSIS){
783
20.0M
    ac_bits=oc_enc_tokenize_ac(_enc,_pli,_fragi,idct,data,dequant,dct,
784
20.0M
     nonzero+1,_stack,OC_RD_ISCALE(_enc->lambda,_rd_iscale),qti?0:3);
785
20.0M
  }
786
0
  else{
787
0
    ac_bits=oc_enc_tokenize_ac_fast(_enc,_pli,_fragi,idct,data,dequant,dct,
788
0
     nonzero+1,_stack,OC_RD_ISCALE(_enc->lambda,_rd_iscale),qti?0:3);
789
0
  }
790
  /*Reconstruct.
791
    TODO: nonzero may need to be adjusted after tokenization.*/
792
20.0M
  dequant_dc=dequant[0];
793
20.0M
  if(nonzero==0){
794
16.8M
    ogg_int16_t p;
795
16.8M
    int         ci;
796
16.8M
    int         qi01;
797
16.8M
    int         qi12;
798
    /*We round this dequant product (and not any of the others) because there's
799
       no iDCT rounding.*/
800
16.8M
    p=(ogg_int16_t)(dc*(ogg_int32_t)dequant_dc+15>>5);
801
    /*LOOP VECTORIZES.*/
802
1.09G
    for(ci=0;ci<64;ci++)data[ci]=p;
803
    /*We didn't code any AC coefficients, so don't change the quantizer.*/
804
16.8M
    qi01=_pipe->qs[_pli].qi01;
805
16.8M
    qi12=_pipe->qs[_pli].qi12;
806
16.8M
    if(qi01>0)qii=1+qi12;
807
14.6M
    else if(qi01>=0)qii=0;
808
16.8M
  }
809
3.16M
  else{
810
3.16M
    idct[0]=dc*dequant_dc;
811
    /*Note: This clears idct[] back to zero for the next block.*/
812
3.16M
    oc_idct8x8(&_enc->state,data,idct,nonzero+1);
813
3.16M
  }
814
20.0M
  frags[_fragi].qii=qii;
815
20.0M
  if(nqis>1){
816
7.34M
    oc_qii_state_advance(&qs,_pipe->qs+_pli,qii);
817
7.34M
    ac_bits+=qs.bits-_pipe->qs[_pli].bits;
818
7.34M
  }
819
20.0M
  if(!qti)oc_enc_frag_recon_intra(_enc,dst,ystride,data);
820
451k
  else{
821
451k
    oc_enc_frag_recon_inter(_enc,dst,
822
451k
     nmv_offs==1?ref+mv_offs[0]:dst,ystride,data);
823
451k
  }
824
  /*If _fr is NULL, then this is an INTRA frame, and we can't skip blocks.*/
825
20.0M
#if !defined(OC_COLLECT_METRICS)
826
20.0M
  if(_fr!=NULL)
827
1.44M
#endif
828
1.44M
  {
829
    /*In retrospect, should we have skipped this block?*/
830
1.44M
    if(borderi<0){
831
912k
      coded_ssd=oc_enc_frag_ssd(_enc,src,dst,ystride);
832
912k
    }
833
534k
    else{
834
534k
      coded_ssd=oc_enc_frag_border_ssd(_enc,src,dst,ystride,
835
534k
       _enc->state.borders[borderi].mask);
836
534k
    }
837
    /*Scale to match DCT domain.*/
838
1.44M
    coded_ssd<<=4;
839
#if defined(OC_COLLECT_METRICS)
840
    _enc->frag_ssd[_fragi]=coded_ssd;
841
  }
842
  if(_fr!=NULL){
843
#endif
844
1.44M
    coded_ssd=OC_RD_SCALE(coded_ssd,_rd_scale);
845
1.44M
    uncoded_ssd=_pipe->skip_ssd[_pli][_fragi-_pipe->froffset[_pli]];
846
1.44M
    if(uncoded_ssd<UINT_MAX&&
847
     /*Don't allow luma blocks to be skipped in 4MV mode when VP3 compatibility
848
        is enabled.*/
849
1.44M
     (!_enc->vp3_compatible||mb_mode!=OC_MODE_INTER_MV_FOUR||_pli)){
850
1.44M
      int overhead_bits;
851
1.44M
      overhead_bits=oc_fr_cost1(_fr);
852
      /*Although the fragment coding overhead determination is accurate, it is
853
         greedy, using very coarse-grained local information.
854
        Allowing it to mildly discourage coding turns out to be beneficial, but
855
         it's not clear that allowing it to encourage coding through negative
856
         coding overhead deltas is useful.
857
        For that reason, we disallow negative coding overheads.*/
858
1.44M
      if(overhead_bits<0)overhead_bits=0;
859
1.44M
      if(uncoded_ssd<=coded_ssd+(overhead_bits+ac_bits)*_enc->lambda){
860
        /*Hm, not worth it; roll back.*/
861
202k
        oc_enc_tokenlog_rollback(_enc,checkpoint,(*_stack)-checkpoint);
862
202k
        *_stack=checkpoint;
863
202k
        frags[_fragi].coded=0;
864
202k
        frags[_fragi].refi=OC_FRAME_NONE;
865
202k
        oc_fr_skip_block(_fr);
866
202k
        return 0;
867
202k
      }
868
1.44M
    }
869
0
    else _mo->dc_flag=1;
870
1.24M
    _mo->uncoded_ac_ssd+=uncoded_ssd;
871
1.24M
    _mo->coded_ac_ssd+=coded_ssd;
872
1.24M
    _mo->ac_bits+=ac_bits;
873
1.24M
    oc_fr_code_block(_fr);
874
1.24M
  }
875
  /*GCC 4.4.4 generates a warning here because it can't tell that
876
     the init code in the nqis check above will run anytime this
877
     line runs.*/
878
19.8M
  if(nqis>1)*(_pipe->qs+_pli)=*&qs;
879
19.8M
  frags[_fragi].dc=dc;
880
19.8M
  frags[_fragi].coded=1;
881
19.8M
  return 1;
882
20.0M
}
883
884
static int oc_enc_mb_transform_quantize_inter_luma(oc_enc_ctx *_enc,
885
 oc_enc_pipeline_state *_pipe,unsigned _mbi,int _mode_overhead,
886
308k
 const unsigned _rd_scale[4],const unsigned _rd_iscale[4]){
887
  /*Worst case token stack usage for 4 fragments.*/
888
308k
  oc_token_checkpoint  stack[64*4];
889
308k
  oc_token_checkpoint *stackptr;
890
308k
  const oc_sb_map     *sb_maps;
891
308k
  signed char         *mb_modes;
892
308k
  oc_fragment         *frags;
893
308k
  ptrdiff_t           *coded_fragis;
894
308k
  ptrdiff_t            ncoded_fragis;
895
308k
  ptrdiff_t           *uncoded_fragis;
896
308k
  ptrdiff_t            nuncoded_fragis;
897
308k
  oc_rd_metric         mo;
898
308k
  oc_fr_state          fr_checkpoint;
899
308k
  oc_qii_state         qs_checkpoint;
900
308k
  int                  mb_mode;
901
308k
  int                  refi;
902
308k
  int                  ncoded;
903
308k
  ptrdiff_t            fragi;
904
308k
  int                  bi;
905
308k
  *&fr_checkpoint=*(_pipe->fr+0);
906
308k
  *&qs_checkpoint=*(_pipe->qs+0);
907
308k
  sb_maps=(const oc_sb_map *)_enc->state.sb_maps;
908
308k
  mb_modes=_enc->state.mb_modes;
909
308k
  frags=_enc->state.frags;
910
308k
  coded_fragis=_pipe->coded_fragis[0];
911
308k
  ncoded_fragis=_pipe->ncoded_fragis[0];
912
308k
  uncoded_fragis=_pipe->uncoded_fragis[0];
913
308k
  nuncoded_fragis=_pipe->nuncoded_fragis[0];
914
308k
  mb_mode=mb_modes[_mbi];
915
308k
  refi=OC_FRAME_FOR_MODE(mb_mode);
916
308k
  ncoded=0;
917
308k
  stackptr=stack;
918
308k
  memset(&mo,0,sizeof(mo));
919
1.54M
  for(bi=0;bi<4;bi++){
920
1.23M
    fragi=sb_maps[_mbi>>2][_mbi&3][bi];
921
1.23M
    frags[fragi].refi=refi;
922
1.23M
    frags[fragi].mb_mode=mb_mode;
923
1.23M
    if(oc_enc_block_transform_quantize(_enc,_pipe,0,fragi,
924
1.23M
     _rd_scale[bi],_rd_iscale[bi],&mo,_pipe->fr+0,&stackptr)){
925
867k
      coded_fragis[ncoded_fragis++]=fragi;
926
867k
      ncoded++;
927
867k
    }
928
367k
    else *(uncoded_fragis-++nuncoded_fragis)=fragi;
929
1.23M
  }
930
308k
  if(ncoded>0&&!mo.dc_flag){
931
244k
    int cost;
932
    /*Some individual blocks were worth coding.
933
      See if that's still true when accounting for mode and MV overhead.*/
934
244k
    cost=mo.coded_ac_ssd+_enc->lambda*(mo.ac_bits
935
244k
     +oc_fr_cost4(&fr_checkpoint,_pipe->fr+0)+_mode_overhead);
936
244k
    if(mo.uncoded_ac_ssd<=cost){
937
      /*Taking macroblock overhead into account, it is not worth coding this
938
         MB.*/
939
6.16k
      oc_enc_tokenlog_rollback(_enc,stack,stackptr-stack);
940
6.16k
      *(_pipe->fr+0)=*&fr_checkpoint;
941
6.16k
      *(_pipe->qs+0)=*&qs_checkpoint;
942
30.8k
      for(bi=0;bi<4;bi++){
943
24.6k
        fragi=sb_maps[_mbi>>2][_mbi&3][bi];
944
24.6k
        if(frags[fragi].coded){
945
8.47k
          *(uncoded_fragis-++nuncoded_fragis)=fragi;
946
8.47k
          frags[fragi].coded=0;
947
8.47k
          frags[fragi].refi=OC_FRAME_NONE;
948
8.47k
        }
949
24.6k
        oc_fr_skip_block(_pipe->fr+0);
950
24.6k
      }
951
6.16k
      ncoded_fragis-=ncoded;
952
6.16k
      ncoded=0;
953
6.16k
    }
954
244k
  }
955
  /*If no luma blocks coded, the mode is forced.*/
956
308k
  if(ncoded==0)mb_modes[_mbi]=OC_MODE_INTER_NOMV;
957
  /*Assume that a 1MV with a single coded block is always cheaper than a 4MV
958
     with a single coded block.
959
    This may not be strictly true: a 4MV computes chroma MVs using (0,0) for
960
     skipped blocks, while a 1MV does not.*/
961
237k
  else if(ncoded==1&&mb_mode==OC_MODE_INTER_MV_FOUR){
962
203
    mb_modes[_mbi]=OC_MODE_INTER_MV;
963
203
  }
964
308k
  _pipe->ncoded_fragis[0]=ncoded_fragis;
965
308k
  _pipe->nuncoded_fragis[0]=nuncoded_fragis;
966
308k
  return ncoded;
967
308k
}
968
969
static void oc_enc_sb_transform_quantize_inter_chroma(oc_enc_ctx *_enc,
970
82.0k
 oc_enc_pipeline_state *_pipe,int _pli,int _sbi_start,int _sbi_end){
971
82.0k
  const ogg_uint16_t *mcu_rd_scale;
972
82.0k
  const ogg_uint16_t *mcu_rd_iscale;
973
82.0k
  const oc_sb_map    *sb_maps;
974
82.0k
  oc_sb_flags        *sb_flags;
975
82.0k
  oc_fr_state        *fr;
976
82.0k
  ptrdiff_t          *coded_fragis;
977
82.0k
  ptrdiff_t           ncoded_fragis;
978
82.0k
  ptrdiff_t          *uncoded_fragis;
979
82.0k
  ptrdiff_t           nuncoded_fragis;
980
82.0k
  ptrdiff_t           froffset;
981
82.0k
  int                 sbi;
982
82.0k
  fr=_pipe->fr+_pli;
983
82.0k
  mcu_rd_scale=(const ogg_uint16_t *)_enc->mcu_rd_scale;
984
82.0k
  mcu_rd_iscale=(const ogg_uint16_t *)_enc->mcu_rd_iscale;
985
82.0k
  sb_maps=(const oc_sb_map *)_enc->state.sb_maps;
986
82.0k
  sb_flags=_enc->state.sb_flags;
987
82.0k
  coded_fragis=_pipe->coded_fragis[_pli];
988
82.0k
  ncoded_fragis=_pipe->ncoded_fragis[_pli];
989
82.0k
  uncoded_fragis=_pipe->uncoded_fragis[_pli];
990
82.0k
  nuncoded_fragis=_pipe->nuncoded_fragis[_pli];
991
82.0k
  froffset=_pipe->froffset[_pli];
992
211k
  for(sbi=_sbi_start;sbi<_sbi_end;sbi++){
993
    /*Worst case token stack usage for 1 fragment.*/
994
129k
    oc_token_checkpoint stack[64];
995
129k
    oc_rd_metric        mo;
996
129k
    int                 quadi;
997
129k
    int                 bi;
998
129k
    memset(&mo,0,sizeof(mo));
999
2.58M
    for(quadi=0;quadi<4;quadi++)for(bi=0;bi<4;bi++){
1000
2.06M
      ptrdiff_t fragi;
1001
2.06M
      fragi=sb_maps[sbi][quadi][bi];
1002
2.06M
      if(fragi>=0){
1003
686k
        oc_token_checkpoint *stackptr;
1004
686k
        unsigned             rd_scale;
1005
686k
        unsigned             rd_iscale;
1006
686k
        rd_scale=mcu_rd_scale[fragi-froffset];
1007
686k
        rd_iscale=mcu_rd_iscale[fragi-froffset];
1008
686k
        stackptr=stack;
1009
686k
        if(oc_enc_block_transform_quantize(_enc,_pipe,_pli,fragi,
1010
686k
         rd_scale,rd_iscale,&mo,fr,&stackptr)){
1011
376k
          coded_fragis[ncoded_fragis++]=fragi;
1012
376k
        }
1013
310k
        else *(uncoded_fragis-++nuncoded_fragis)=fragi;
1014
686k
      }
1015
2.06M
    }
1016
129k
    oc_fr_state_flush_sb(fr);
1017
129k
    sb_flags[sbi].coded_fully=fr->sb_full;
1018
129k
    sb_flags[sbi].coded_partially=fr->sb_partial;
1019
129k
  }
1020
82.0k
  _pipe->ncoded_fragis[_pli]=ncoded_fragis;
1021
82.0k
  _pipe->nuncoded_fragis[_pli]=nuncoded_fragis;
1022
82.0k
}
1023
1024
/*Mode decision is done by exhaustively examining all potential choices.
1025
  Obviously, doing the motion compensation, fDCT, tokenization, and then
1026
   counting the bits each token uses is computationally expensive.
1027
  Theora's EOB runs can also split the cost of these tokens across multiple
1028
   fragments, and naturally we don't know what the optimal choice of Huffman
1029
   codes will be until we know all the tokens we're going to encode in all the
1030
   fragments.
1031
  So we use a simple approach to estimating the bit cost and distortion of each
1032
   mode based upon the SATD value of the residual before coding.
1033
  The mathematics behind the technique are outlined by Kim \cite{Kim03}, but
1034
   the process (modified somewhat from that of the paper) is very simple.
1035
  We build a non-linear regression of the mappings from
1036
   (pre-transform+quantization) SATD to (post-transform+quantization) bits and
1037
   SSD for each qi.
1038
  A separate set of mappings is kept for each quantization type and color
1039
   plane.
1040
  The mappings are constructed by partitioning the SATD values into a small
1041
   number of bins (currently 24) and using a linear regression in each bin
1042
   (as opposed to the 0th-order regression used by Kim).
1043
  The bit counts and SSD measurements are obtained by examining actual encoded
1044
   frames, with appropriate lambda values and optimal Huffman codes selected.
1045
  EOB bits are assigned to the fragment that started the EOB run (as opposed to
1046
   dividing them among all the blocks in the run; the latter approach seems
1047
   more theoretically correct, but Monty's testing showed a small improvement
1048
   with the former, though that may have been merely statistical noise).
1049
1050
  @ARTICLE{Kim03,
1051
    author="Hyun Mun Kim",
1052
    title="Adaptive Rate Control Using Nonlinear Regression",
1053
    journal="IEEE Transactions on Circuits and Systems for Video Technology",
1054
    volume=13,
1055
    number=5,
1056
    pages="432--439",
1057
    month=May,
1058
    year=2003
1059
  }*/
1060
1061
/*Computes (_ssd+_lambda*_rate)/(1<<OC_BIT_SCALE) with rounding, avoiding
1062
   overflow for large lambda values.*/
1063
#define OC_MODE_RD_COST(_ssd,_rate,_lambda) \
1064
84.4M
 ((_ssd)>>OC_BIT_SCALE)+((_rate)>>OC_BIT_SCALE)*(_lambda) \
1065
84.4M
 +(((_ssd)&(1<<OC_BIT_SCALE)-1)+((_rate)&(1<<OC_BIT_SCALE)-1)*(_lambda) \
1066
84.4M
 +((1<<OC_BIT_SCALE)>>1)>>OC_BIT_SCALE)
1067
1068
65.3k
static void oc_enc_mode_rd_init(oc_enc_ctx *_enc){
1069
65.3k
#if !defined(OC_COLLECT_METRICS)
1070
65.3k
  const
1071
65.3k
#endif
1072
65.3k
  oc_mode_rd (*oc_mode_rd_table)[3][2][OC_COMP_BINS]=
1073
65.3k
   _enc->sp_level<OC_SP_LEVEL_NOSATD?OC_MODE_RD_SATD:OC_MODE_RD_SAD;
1074
65.3k
  int qii;
1075
#if defined(OC_COLLECT_METRICS)
1076
  oc_enc_mode_metrics_load(_enc);
1077
#endif
1078
184k
  for(qii=0;qii<_enc->state.nqis;qii++){
1079
119k
    int qi;
1080
119k
    int pli;
1081
119k
    qi=_enc->state.qis[qii];
1082
478k
    for(pli=0;pli<3;pli++){
1083
358k
      int qti;
1084
1.07M
      for(qti=0;qti<2;qti++){
1085
717k
        int log_plq;
1086
717k
        int modeline;
1087
717k
        int bin;
1088
717k
        int dx;
1089
717k
        int dq;
1090
717k
        log_plq=_enc->log_plq[qi][pli][qti];
1091
        /*Find the pair of rows in the mode table that bracket this quantizer.
1092
          If it falls outside the range the table covers, then we just use a
1093
           pair on the edge for linear extrapolation.*/
1094
3.61M
        for(modeline=0;modeline<OC_LOGQ_BINS-1&&
1095
3.61M
         OC_MODE_LOGQ[modeline+1][pli][qti]>log_plq;modeline++);
1096
        /*Interpolate a row for this quantizer.*/
1097
717k
        dx=OC_MODE_LOGQ[modeline][pli][qti]-log_plq;
1098
717k
        dq=OC_MODE_LOGQ[modeline][pli][qti]-OC_MODE_LOGQ[modeline+1][pli][qti];
1099
717k
        if(dq==0)dq=1;
1100
17.9M
        for(bin=0;bin<OC_COMP_BINS;bin++){
1101
17.2M
          int y0;
1102
17.2M
          int z0;
1103
17.2M
          int dy;
1104
17.2M
          int dz;
1105
17.2M
          y0=oc_mode_rd_table[modeline][pli][qti][bin].rate;
1106
17.2M
          z0=oc_mode_rd_table[modeline][pli][qti][bin].rmse;
1107
17.2M
          dy=oc_mode_rd_table[modeline+1][pli][qti][bin].rate-y0;
1108
17.2M
          dz=oc_mode_rd_table[modeline+1][pli][qti][bin].rmse-z0;
1109
17.2M
          _enc->mode_rd[qii][pli][qti][bin].rate=
1110
17.2M
           (ogg_int16_t)OC_CLAMPI(-32768,y0+(dy*dx+(dq>>1))/dq,32767);
1111
17.2M
          _enc->mode_rd[qii][pli][qti][bin].rmse=
1112
17.2M
           (ogg_int16_t)OC_CLAMPI(-32768,z0+(dz*dx+(dq>>1))/dq,32767);
1113
17.2M
        }
1114
717k
      }
1115
358k
    }
1116
119k
  }
1117
65.3k
}
1118
1119
/*Estimate the R-D cost of the DCT coefficients given the SATD of a block after
1120
   prediction.*/
1121
static unsigned oc_dct_cost2(oc_enc_ctx *_enc,unsigned *_ssd,
1122
54.3M
 int _qii,int _pli,int _qti,int _satd){
1123
54.3M
  unsigned rmse;
1124
54.3M
  int      shift;
1125
54.3M
  int      bin;
1126
54.3M
  int      dx;
1127
54.3M
  int      y0;
1128
54.3M
  int      z0;
1129
54.3M
  int      dy;
1130
54.3M
  int      dz;
1131
  /*SATD metrics for chroma planes vary much less than luma, so we scale them
1132
     by 4 to distribute them into the mode decision bins more evenly.*/
1133
54.3M
  _satd<<=_pli+1&2;
1134
54.3M
  shift=_enc->sp_level<OC_SP_LEVEL_NOSATD?OC_SATD_SHIFT:OC_SAD_SHIFT;
1135
54.3M
  bin=OC_MINI(_satd>>shift,OC_COMP_BINS-2);
1136
54.3M
  dx=_satd-(bin<<shift);
1137
54.3M
  y0=_enc->mode_rd[_qii][_pli][_qti][bin].rate;
1138
54.3M
  z0=_enc->mode_rd[_qii][_pli][_qti][bin].rmse;
1139
54.3M
  dy=_enc->mode_rd[_qii][_pli][_qti][bin+1].rate-y0;
1140
54.3M
  dz=_enc->mode_rd[_qii][_pli][_qti][bin+1].rmse-z0;
1141
54.3M
  rmse=OC_MAXI(z0+(dz*dx>>shift),0);
1142
54.3M
  *_ssd=rmse*rmse>>2*OC_RMSE_SCALE-OC_BIT_SCALE;
1143
54.3M
  return OC_MAXI(y0+(dy*dx>>shift),0);
1144
54.3M
}
1145
1146
/*activity_avg must be positive, or flat regions could get a zero weight, which
1147
   confounds analysis.
1148
  We set the minimum to this value so that it also avoids the need for divide
1149
   by zero checks in oc_mb_masking().*/
1150
# define OC_ACTIVITY_AVG_MIN (1<<OC_RD_SCALE_BITS)
1151
1152
static unsigned oc_mb_activity(oc_enc_ctx *_enc,unsigned _mbi,
1153
2.76M
 unsigned _activity[4]){
1154
2.76M
  const unsigned char *src;
1155
2.76M
  const ptrdiff_t     *frag_buf_offs;
1156
2.76M
  const ptrdiff_t     *sb_map;
1157
2.76M
  unsigned             luma;
1158
2.76M
  int                  ystride;
1159
2.76M
  ptrdiff_t            frag_offs;
1160
2.76M
  ptrdiff_t            fragi;
1161
2.76M
  int                  bi;
1162
2.76M
  frag_buf_offs=_enc->state.frag_buf_offs;
1163
2.76M
  sb_map=_enc->state.sb_maps[_mbi>>2][_mbi&3];
1164
2.76M
  src=_enc->state.ref_frame_data[OC_FRAME_IO];
1165
2.76M
  ystride=_enc->state.ref_ystride[0];
1166
2.76M
  luma=0;
1167
13.8M
  for(bi=0;bi<4;bi++){
1168
11.0M
    const unsigned char *s;
1169
11.0M
    unsigned             x;
1170
11.0M
    unsigned             x2;
1171
11.0M
    unsigned             act;
1172
11.0M
    int                  i;
1173
11.0M
    int                  j;
1174
11.0M
    fragi=sb_map[bi];
1175
11.0M
    frag_offs=frag_buf_offs[fragi];
1176
    /*TODO: This could be replaced with SATD^2, since we already have to
1177
       compute SATD.*/
1178
11.0M
    x=x2=0;
1179
11.0M
    s=src+frag_offs;
1180
99.6M
    for(i=0;i<8;i++){
1181
797M
      for(j=0;j<8;j++){
1182
708M
        unsigned c;
1183
708M
        c=s[j];
1184
708M
        x+=c;
1185
708M
        x2+=c*c;
1186
708M
      }
1187
88.5M
      s+=ystride;
1188
88.5M
    }
1189
11.0M
    luma+=x;
1190
11.0M
    act=(x2<<6)-x*x;
1191
11.0M
    if(act<8<<12){
1192
      /*The region is flat.*/
1193
8.58M
      act=OC_MINI(act,5<<12);
1194
8.58M
    }
1195
2.48M
    else{
1196
2.48M
      unsigned e1;
1197
2.48M
      unsigned e2;
1198
2.48M
      unsigned e3;
1199
2.48M
      unsigned e4;
1200
      /*Test for an edge.
1201
        TODO: There are probably much simpler ways to do this (e.g., it could
1202
         probably be combined with the SATD calculation).
1203
        Alternatively, we could split the block around the mean and compute the
1204
         reduction in variance in each half.
1205
        For a Gaussian source the reduction should be
1206
         (1-2/pi) ~= 0.36338022763241865692446494650994.
1207
        Significantly more reduction is a good indication of a bi-level image.
1208
        This has the advantage of identifying, in addition to straight edges,
1209
         small text regions, which would otherwise be classified as "texture".*/
1210
2.48M
      e1=e2=e3=e4=0;
1211
2.48M
      s=src+frag_offs-1;
1212
22.3M
      for(i=0;i<8;i++){
1213
178M
        for(j=0;j<8;j++){
1214
158M
          e1+=abs((s[j+2]-s[j]<<1)+(s-ystride)[j+2]-(s-ystride)[j]
1215
158M
           +(s+ystride)[j+2]-(s+ystride)[j]);
1216
158M
          e2+=abs(((s+ystride)[j+1]-(s-ystride)[j+1]<<1)
1217
158M
           +(s+ystride)[j]-(s-ystride)[j]+(s+ystride)[j+2]-(s-ystride)[j+2]);
1218
158M
          e3+=abs(((s+ystride)[j+2]-(s-ystride)[j]<<1)
1219
158M
           +(s+ystride)[j+1]-s[j]+s[j+2]-(s-ystride)[j+1]);
1220
158M
          e4+=abs(((s+ystride)[j]-(s-ystride)[j+2]<<1)
1221
158M
           +(s+ystride)[j+1]-s[j+2]+s[j]-(s-ystride)[j+1]);
1222
158M
        }
1223
19.8M
        s+=ystride;
1224
19.8M
      }
1225
      /*If the largest component of the edge energy is at least 40% of the
1226
         total, then classify the block as an edge block.*/
1227
2.48M
      if(5*OC_MAXI(OC_MAXI(e1,e2),OC_MAXI(e3,e4))>2*(e1+e2+e3+e4)){
1228
         /*act=act_th*(act/act_th)**0.7
1229
              =exp(log(act_th)+0.7*(log(act)-log(act_th))).
1230
           Here act_th=5.0 and 0x394A=oc_blog32_q10(5<<12).*/
1231
35.0k
         act=oc_bexp32_q10(0x394A+(7*(oc_blog32_q10(act)-0x394A+5)/10));
1232
35.0k
      }
1233
2.48M
    }
1234
11.0M
    _activity[bi]=act;
1235
11.0M
  }
1236
2.76M
  return luma;
1237
2.76M
}
1238
1239
static void oc_mb_activity_fast(oc_enc_ctx *_enc,unsigned _mbi,
1240
0
 unsigned _activity[4],const unsigned _intra_satd[12]){
1241
0
  int bi;
1242
0
  for(bi=0;bi<4;bi++){
1243
0
    unsigned act;
1244
0
    act=(11*_intra_satd[bi]>>8)*_intra_satd[bi];
1245
0
    if(act<8<<12){
1246
      /*The region is flat.*/
1247
0
      act=OC_MINI(act,5<<12);
1248
0
    }
1249
0
    _activity[bi]=act;
1250
0
  }
1251
0
}
1252
1253
/*Compute the masking scales for the blocks in a macro block.
1254
  All masking is computed from the luma blocks.
1255
  We derive scaling factors for the chroma blocks from these, and use the same
1256
   ones for all chroma blocks, regardless of the subsampling.
1257
  It's possible for luma to be perfectly flat and yet have high chroma energy,
1258
   but this is unlikely in non-artificial images, and not a case that has been
1259
   addressed by any research to my knowledge.
1260
  The output of the masking process is two scale factors, which are fed into
1261
   the various R-D optimizations.
1262
  The first, rd_scale, is applied to D in the equation
1263
    D*rd_scale+lambda*R.
1264
  This is the form that must be used to properly combine scores from multiple
1265
   blocks, and can be interpreted as scaling distortions by their visibility.
1266
  The inverse, rd_iscale, is applied to lambda in the equation
1267
    D+rd_iscale*lambda*R.
1268
  This is equivalent to the first form within a single block, but much faster
1269
   to use when evaluating many possible distortions (e.g., during actual
1270
   quantization, where separate distortions are evaluated for every
1271
   coefficient).
1272
  The two macros OC_RD_SCALE(rd_scale,d) and OC_RD_ISCALE(rd_iscale,lambda) are
1273
   used to perform the multiplications with the proper re-scaling for the range
1274
   of the scaling factors.
1275
  Many researchers apply masking values directly to the quantizers used, and
1276
   not to the R-D cost.
1277
  Since we generally use MSE for D, rd_scale must use the square of their
1278
   values to generate an equivalent effect.*/
1279
static unsigned oc_mb_masking(unsigned _rd_scale[5],unsigned _rd_iscale[5],
1280
 const ogg_uint16_t _chroma_rd_scale[2],const unsigned _activity[4],
1281
2.76M
 unsigned _activity_avg,unsigned _luma,unsigned _luma_avg){
1282
2.76M
  unsigned activity_sum;
1283
2.76M
  unsigned la;
1284
2.76M
  unsigned lb;
1285
2.76M
  unsigned d;
1286
2.76M
  int      bi;
1287
2.76M
  int      bi_min;
1288
2.76M
  int      bi_min2;
1289
  /*The ratio lb/la is meant to approximate
1290
     ((((_luma-16)/219)*(255/128))**0.649**0.4**2), which is the
1291
     effective luminance masking from~\cite{LKW06} (including the self-masking
1292
     deflator).
1293
    The following actually turns out to be a pretty good approximation for
1294
     _luma>75 or so.
1295
    For smaller values luminance does not really follow Weber's Law anyway, and
1296
     this approximation gives a much less aggressive bitrate boost in this
1297
     region.
1298
    Though some researchers claim that contrast sensitivity actually decreases
1299
     for very low luminance values, in my experience excessive brightness on
1300
     LCDs or buggy color conversions (e.g., treating Y' as full-range instead
1301
     of the CCIR 601 range) make artifacts in such regions extremely visible.
1302
    We substitute _luma_avg for 128 to allow the strength of the masking to
1303
     vary with the actual average image luminance, within certain limits (the
1304
     caller has clamped _luma_avg to the range [90,160], inclusive).
1305
    @ARTICLE{LKW06,
1306
      author="Zhen Liu and Lina J. Karam and Andrew B. Watson",
1307
      title="{JPEG2000} Encoding With Perceptual Distortion Control",
1308
      journal="{IEEE} Transactions on Image Processing",
1309
      volume=15,
1310
      number=7,
1311
      pages="1763--1778",
1312
      month=Jul,
1313
      year=2006
1314
    }*/
1315
#if 0
1316
  la=_luma+4*_luma_avg;
1317
  lb=4*_luma+_luma_avg;
1318
#else
1319
  /*Disable luminance masking.*/
1320
2.76M
  la=lb=1;
1321
2.76M
#endif
1322
2.76M
  activity_sum=0;
1323
13.8M
  for(bi=0;bi<4;bi++){
1324
11.0M
    unsigned a;
1325
11.0M
    unsigned b;
1326
11.0M
    activity_sum+=_activity[bi];
1327
    /*Apply activity masking.*/
1328
11.0M
    a=_activity[bi]+4*_activity_avg;
1329
11.0M
    b=4*_activity[bi]+_activity_avg;
1330
11.0M
    d=OC_RD_SCALE(b,1);
1331
    /*And luminance masking.*/
1332
11.0M
    d=(a+(d>>1))/d;
1333
11.0M
    _rd_scale[bi]=(d*la+(lb>>1))/lb;
1334
    /*And now the inverse.*/
1335
11.0M
    d=OC_MAXI(OC_RD_ISCALE(a,1),1);
1336
11.0M
    d=(b+(d>>1))/d;
1337
11.0M
    _rd_iscale[bi]=(d*lb+(la>>1))/la;
1338
11.0M
  }
1339
  /*Now compute scaling factors for chroma blocks.
1340
    We start by finding the two smallest iscales from the luma blocks.*/
1341
2.76M
  bi_min=_rd_iscale[1]<_rd_iscale[0];
1342
2.76M
  bi_min2=1-bi_min;
1343
8.30M
  for(bi=2;bi<4;bi++){
1344
5.53M
    if(_rd_iscale[bi]<_rd_iscale[bi_min]){
1345
444k
      bi_min2=bi_min;
1346
444k
      bi_min=bi;
1347
444k
    }
1348
5.09M
    else if(_rd_iscale[bi]<_rd_iscale[bi_min2])bi_min2=bi;
1349
5.53M
  }
1350
  /*If the minimum iscale is less than 1.0, use the second smallest instead,
1351
     and force the value to at least 1.0 (inflating chroma is a waste).*/
1352
2.76M
  if(_rd_iscale[bi_min]<(1<<OC_RD_ISCALE_BITS))bi_min=bi_min2;
1353
2.76M
  d=OC_MINI(_rd_scale[bi_min],1<<OC_RD_SCALE_BITS);
1354
2.76M
  _rd_scale[4]=OC_RD_SCALE(d,_chroma_rd_scale[0]);
1355
2.76M
  d=OC_MAXI(_rd_iscale[bi_min],1<<OC_RD_ISCALE_BITS);
1356
2.76M
  _rd_iscale[4]=OC_RD_ISCALE(d,_chroma_rd_scale[1]);
1357
2.76M
  return activity_sum;
1358
2.76M
}
1359
1360
static int oc_mb_intra_satd(oc_enc_ctx *_enc,unsigned _mbi,
1361
308k
 unsigned _frag_satd[12]){
1362
308k
  const unsigned char   *src;
1363
308k
  const ptrdiff_t       *frag_buf_offs;
1364
308k
  const ptrdiff_t       *sb_map;
1365
308k
  const oc_mb_map_plane *mb_map;
1366
308k
  const unsigned char   *map_idxs;
1367
308k
  int                    map_nidxs;
1368
308k
  int                    mapii;
1369
308k
  int                    mapi;
1370
308k
  int                    ystride;
1371
308k
  int                    pli;
1372
308k
  int                    bi;
1373
308k
  ptrdiff_t              fragi;
1374
308k
  ptrdiff_t              frag_offs;
1375
308k
  unsigned               luma;
1376
308k
  int                    dc;
1377
308k
  frag_buf_offs=_enc->state.frag_buf_offs;
1378
308k
  sb_map=_enc->state.sb_maps[_mbi>>2][_mbi&3];
1379
308k
  src=_enc->state.ref_frame_data[OC_FRAME_IO];
1380
308k
  ystride=_enc->state.ref_ystride[0];
1381
308k
  luma=0;
1382
1.54M
  for(bi=0;bi<4;bi++){
1383
1.23M
    fragi=sb_map[bi];
1384
1.23M
    frag_offs=frag_buf_offs[fragi];
1385
1.23M
    _frag_satd[bi]=oc_enc_frag_intra_satd(_enc,&dc,src+frag_offs,ystride);
1386
1.23M
    luma+=dc;
1387
1.23M
  }
1388
308k
  mb_map=(const oc_mb_map_plane *)_enc->state.mb_maps[_mbi];
1389
308k
  map_idxs=OC_MB_MAP_IDXS[_enc->state.info.pixel_fmt];
1390
308k
  map_nidxs=OC_MB_MAP_NIDXS[_enc->state.info.pixel_fmt];
1391
  /*Note: This assumes ref_ystride[1]==ref_ystride[2].*/
1392
308k
  ystride=_enc->state.ref_ystride[1];
1393
995k
  for(mapii=4;mapii<map_nidxs;mapii++){
1394
686k
    mapi=map_idxs[mapii];
1395
686k
    pli=mapi>>2;
1396
686k
    bi=mapi&3;
1397
686k
    fragi=mb_map[pli][bi];
1398
686k
    frag_offs=frag_buf_offs[fragi];
1399
686k
    _frag_satd[mapii]=oc_enc_frag_intra_satd(_enc,&dc,src+frag_offs,ystride);
1400
686k
  }
1401
308k
  return luma;
1402
308k
}
1403
1404
/*Select luma block-level quantizers for a MB in an INTRA frame.*/
1405
static unsigned oc_analyze_intra_mb_luma(oc_enc_ctx *_enc,
1406
2.45M
 const oc_qii_state *_qs,unsigned _mbi,const unsigned _rd_scale[4]){
1407
2.45M
  const unsigned char *src;
1408
2.45M
  const ptrdiff_t     *frag_buf_offs;
1409
2.45M
  const oc_sb_map     *sb_maps;
1410
2.45M
  oc_fragment         *frags;
1411
2.45M
  ptrdiff_t            frag_offs;
1412
2.45M
  ptrdiff_t            fragi;
1413
2.45M
  oc_qii_state         qs[4][3];
1414
2.45M
  unsigned             cost[4][3];
1415
2.45M
  unsigned             ssd[4][3];
1416
2.45M
  unsigned             rate[4][3];
1417
2.45M
  int                  prev[3][3];
1418
2.45M
  unsigned             satd;
1419
2.45M
  int                  dc;
1420
2.45M
  unsigned             best_cost;
1421
2.45M
  unsigned             best_ssd;
1422
2.45M
  unsigned             best_rate;
1423
2.45M
  int                  best_qii;
1424
2.45M
  int                  qii;
1425
2.45M
  int                  lambda;
1426
2.45M
  int                  ystride;
1427
2.45M
  int                  nqis;
1428
2.45M
  int                  bi;
1429
2.45M
  frag_buf_offs=_enc->state.frag_buf_offs;
1430
2.45M
  sb_maps=(const oc_sb_map *)_enc->state.sb_maps;
1431
2.45M
  src=_enc->state.ref_frame_data[OC_FRAME_IO];
1432
2.45M
  ystride=_enc->state.ref_ystride[0];
1433
2.45M
  fragi=sb_maps[_mbi>>2][_mbi&3][0];
1434
2.45M
  frag_offs=frag_buf_offs[fragi];
1435
2.45M
  if(_enc->sp_level<OC_SP_LEVEL_NOSATD){
1436
2.45M
    satd=oc_enc_frag_intra_satd(_enc,&dc,src+frag_offs,ystride);
1437
2.45M
  }
1438
0
  else{
1439
0
    satd=oc_enc_frag_intra_sad(_enc,src+frag_offs,ystride);
1440
0
  }
1441
2.45M
  nqis=_enc->state.nqis;
1442
2.45M
  lambda=_enc->lambda;
1443
6.58M
  for(qii=0;qii<nqis;qii++){
1444
4.12M
    oc_qii_state_advance(qs[0]+qii,_qs,qii);
1445
4.12M
    rate[0][qii]=oc_dct_cost2(_enc,ssd[0]+qii,qii,0,0,satd)
1446
4.12M
     +(qs[0][qii].bits-_qs->bits<<OC_BIT_SCALE);
1447
4.12M
    ssd[0][qii]=OC_RD_SCALE(ssd[0][qii],_rd_scale[0]);
1448
4.12M
    cost[0][qii]=OC_MODE_RD_COST(ssd[0][qii],rate[0][qii],lambda);
1449
4.12M
  }
1450
9.83M
  for(bi=1;bi<4;bi++){
1451
7.37M
    fragi=sb_maps[_mbi>>2][_mbi&3][bi];
1452
7.37M
    frag_offs=frag_buf_offs[fragi];
1453
7.37M
    if(_enc->sp_level<OC_SP_LEVEL_NOSATD){
1454
7.37M
      satd=oc_enc_frag_intra_satd(_enc,&dc,src+frag_offs,ystride);
1455
7.37M
    }
1456
0
    else{
1457
0
      satd=oc_enc_frag_intra_sad(_enc,src+frag_offs,ystride);
1458
0
    }
1459
19.7M
    for(qii=0;qii<nqis;qii++){
1460
12.3M
      oc_qii_state qt[3];
1461
12.3M
      unsigned     cur_ssd;
1462
12.3M
      unsigned     cur_rate;
1463
12.3M
      int          best_qij;
1464
12.3M
      int          qij;
1465
12.3M
      oc_qii_state_advance(qt+0,qs[bi-1]+0,qii);
1466
12.3M
      cur_rate=oc_dct_cost2(_enc,&cur_ssd,qii,0,0,satd);
1467
12.3M
      cur_ssd=OC_RD_SCALE(cur_ssd,_rd_scale[bi]);
1468
12.3M
      best_ssd=ssd[bi-1][0]+cur_ssd;
1469
12.3M
      best_rate=rate[bi-1][0]+cur_rate
1470
12.3M
       +(qt[0].bits-qs[bi-1][0].bits<<OC_BIT_SCALE);
1471
12.3M
      best_cost=OC_MODE_RD_COST(best_ssd,best_rate,lambda);
1472
12.3M
      best_qij=0;
1473
27.1M
      for(qij=1;qij<nqis;qij++){
1474
14.7M
        unsigned chain_ssd;
1475
14.7M
        unsigned chain_rate;
1476
14.7M
        unsigned chain_cost;
1477
14.7M
        oc_qii_state_advance(qt+qij,qs[bi-1]+qij,qii);
1478
14.7M
        chain_ssd=ssd[bi-1][qij]+cur_ssd;
1479
14.7M
        chain_rate=rate[bi-1][qij]+cur_rate
1480
14.7M
         +(qt[qij].bits-qs[bi-1][qij].bits<<OC_BIT_SCALE);
1481
14.7M
        chain_cost=OC_MODE_RD_COST(chain_ssd,chain_rate,lambda);
1482
14.7M
        if(chain_cost<best_cost){
1483
6.24M
          best_cost=chain_cost;
1484
6.24M
          best_ssd=chain_ssd;
1485
6.24M
          best_rate=chain_rate;
1486
6.24M
          best_qij=qij;
1487
6.24M
        }
1488
14.7M
      }
1489
12.3M
      *(qs[bi]+qii)=*(qt+best_qij);
1490
12.3M
      cost[bi][qii]=best_cost;
1491
12.3M
      ssd[bi][qii]=best_ssd;
1492
12.3M
      rate[bi][qii]=best_rate;
1493
12.3M
      prev[bi-1][qii]=best_qij;
1494
12.3M
    }
1495
7.37M
  }
1496
2.45M
  best_qii=0;
1497
2.45M
  best_cost=cost[3][0];
1498
4.12M
  for(qii=1;qii<nqis;qii++){
1499
1.67M
    if(cost[3][qii]<best_cost){
1500
539k
      best_cost=cost[3][qii];
1501
539k
      best_qii=qii;
1502
539k
    }
1503
1.67M
  }
1504
2.45M
  frags=_enc->state.frags;
1505
9.83M
  for(bi=3;;){
1506
9.83M
    fragi=sb_maps[_mbi>>2][_mbi&3][bi];
1507
9.83M
    frags[fragi].qii=best_qii;
1508
9.83M
    if(bi--<=0)break;
1509
7.37M
    best_qii=prev[bi][best_qii];
1510
7.37M
  }
1511
2.45M
  return best_cost;
1512
2.45M
}
1513
1514
/*Select a block-level quantizer for a single chroma block in an INTRA frame.*/
1515
static unsigned oc_analyze_intra_chroma_block(oc_enc_ctx *_enc,
1516
8.73M
 const oc_qii_state *_qs,int _pli,ptrdiff_t _fragi,unsigned _rd_scale){
1517
8.73M
  const unsigned char *src;
1518
8.73M
  oc_fragment         *frags;
1519
8.73M
  ptrdiff_t            frag_offs;
1520
8.73M
  oc_qii_state         qt[3];
1521
8.73M
  unsigned             cost[3];
1522
8.73M
  unsigned             satd;
1523
8.73M
  int                  dc;
1524
8.73M
  unsigned             best_cost;
1525
8.73M
  int                  best_qii;
1526
8.73M
  int                  qii;
1527
8.73M
  int                  lambda;
1528
8.73M
  int                  ystride;
1529
8.73M
  int                  nqis;
1530
8.73M
  src=_enc->state.ref_frame_data[OC_FRAME_IO];
1531
8.73M
  ystride=_enc->state.ref_ystride[_pli];
1532
8.73M
  frag_offs=_enc->state.frag_buf_offs[_fragi];
1533
8.73M
  if(_enc->sp_level<OC_SP_LEVEL_NOSATD){
1534
8.73M
    satd=oc_enc_frag_intra_satd(_enc,&dc,src+frag_offs,ystride);
1535
8.73M
  }
1536
0
  else{
1537
0
    satd=oc_enc_frag_intra_sad(_enc,src+frag_offs,ystride);
1538
0
  }
1539
  /*Most chroma blocks have no AC coefficients to speak of anyway, so it's not
1540
     worth spending the bits to change the AC quantizer.
1541
    TODO: This may be worth revisiting when we separate out DC and AC
1542
     predictions from SATD.*/
1543
#if 0
1544
  nqis=_enc->state.nqis;
1545
#else
1546
8.73M
  nqis=1;
1547
8.73M
#endif
1548
8.73M
  lambda=_enc->lambda;
1549
8.73M
  best_qii=0;
1550
17.4M
  for(qii=0;qii<nqis;qii++){
1551
8.73M
    unsigned cur_rate;
1552
8.73M
    unsigned cur_ssd;
1553
8.73M
    oc_qii_state_advance(qt+qii,_qs,qii);
1554
8.73M
    cur_rate=oc_dct_cost2(_enc,&cur_ssd,qii,_pli,0,satd)
1555
8.73M
     +(qt[qii].bits-_qs->bits<<OC_BIT_SCALE);
1556
8.73M
    cur_ssd=OC_RD_SCALE(cur_ssd,_rd_scale);
1557
8.73M
    cost[qii]=OC_MODE_RD_COST(cur_ssd,cur_rate,lambda);
1558
8.73M
  }
1559
8.73M
  best_cost=cost[0];
1560
8.73M
  for(qii=1;qii<nqis;qii++){
1561
0
    if(cost[qii]<best_cost){
1562
0
      best_cost=cost[qii];
1563
0
      best_qii=qii;
1564
0
    }
1565
0
  }
1566
8.73M
  frags=_enc->state.frags;
1567
8.73M
  frags[_fragi].qii=best_qii;
1568
8.73M
  return best_cost;
1569
8.73M
}
1570
1571
static void oc_enc_mb_transform_quantize_intra_luma(oc_enc_ctx *_enc,
1572
 oc_enc_pipeline_state *_pipe,unsigned _mbi,
1573
2.45M
 const unsigned _rd_scale[4],const unsigned _rd_iscale[4]){
1574
  /*Worst case token stack usage for 4 fragments.*/
1575
2.45M
  oc_token_checkpoint  stack[64*4];
1576
2.45M
  oc_token_checkpoint *stackptr;
1577
2.45M
  const oc_sb_map     *sb_maps;
1578
2.45M
  oc_fragment         *frags;
1579
2.45M
  ptrdiff_t           *coded_fragis;
1580
2.45M
  ptrdiff_t            ncoded_fragis;
1581
2.45M
  ptrdiff_t            fragi;
1582
2.45M
  int                  bi;
1583
2.45M
  sb_maps=(const oc_sb_map *)_enc->state.sb_maps;
1584
2.45M
  frags=_enc->state.frags;
1585
2.45M
  coded_fragis=_pipe->coded_fragis[0];
1586
2.45M
  ncoded_fragis=_pipe->ncoded_fragis[0];
1587
2.45M
  stackptr=stack;
1588
12.2M
  for(bi=0;bi<4;bi++){
1589
9.83M
    fragi=sb_maps[_mbi>>2][_mbi&3][bi];
1590
9.83M
    frags[fragi].refi=OC_FRAME_SELF;
1591
9.83M
    frags[fragi].mb_mode=OC_MODE_INTRA;
1592
9.83M
    oc_enc_block_transform_quantize(_enc,_pipe,0,fragi,
1593
9.83M
     _rd_scale[bi],_rd_iscale[bi],NULL,NULL,&stackptr);
1594
9.83M
    coded_fragis[ncoded_fragis++]=fragi;
1595
9.83M
  }
1596
2.45M
  _pipe->ncoded_fragis[0]=ncoded_fragis;
1597
2.45M
}
1598
1599
static void oc_enc_sb_transform_quantize_intra_chroma(oc_enc_ctx *_enc,
1600
344k
 oc_enc_pipeline_state *_pipe,int _pli,int _sbi_start,int _sbi_end){
1601
344k
  const ogg_uint16_t *mcu_rd_scale;
1602
344k
  const ogg_uint16_t *mcu_rd_iscale;
1603
344k
  const oc_sb_map    *sb_maps;
1604
344k
  ptrdiff_t          *coded_fragis;
1605
344k
  ptrdiff_t           ncoded_fragis;
1606
344k
  ptrdiff_t           froffset;
1607
344k
  int                 sbi;
1608
344k
  mcu_rd_scale=(const ogg_uint16_t *)_enc->mcu_rd_scale;
1609
344k
  mcu_rd_iscale=(const ogg_uint16_t *)_enc->mcu_rd_iscale;
1610
344k
  sb_maps=(const oc_sb_map *)_enc->state.sb_maps;
1611
344k
  coded_fragis=_pipe->coded_fragis[_pli];
1612
344k
  ncoded_fragis=_pipe->ncoded_fragis[_pli];
1613
344k
  froffset=_pipe->froffset[_pli];
1614
1.78M
  for(sbi=_sbi_start;sbi<_sbi_end;sbi++){
1615
    /*Worst case token stack usage for 1 fragment.*/
1616
1.44M
    oc_token_checkpoint stack[64];
1617
1.44M
    int                 quadi;
1618
1.44M
    int                 bi;
1619
28.8M
    for(quadi=0;quadi<4;quadi++)for(bi=0;bi<4;bi++){
1620
23.1M
      ptrdiff_t fragi;
1621
23.1M
      fragi=sb_maps[sbi][quadi][bi];
1622
23.1M
      if(fragi>=0){
1623
8.73M
        oc_token_checkpoint *stackptr;
1624
8.73M
        unsigned             rd_scale;
1625
8.73M
        unsigned             rd_iscale;
1626
8.73M
        rd_scale=mcu_rd_scale[fragi-froffset];
1627
8.73M
        rd_iscale=mcu_rd_iscale[fragi-froffset];
1628
8.73M
        oc_analyze_intra_chroma_block(_enc,_pipe->qs+_pli,_pli,fragi,rd_scale);
1629
8.73M
        stackptr=stack;
1630
8.73M
        oc_enc_block_transform_quantize(_enc,_pipe,_pli,fragi,
1631
8.73M
         rd_scale,rd_iscale,NULL,NULL,&stackptr);
1632
8.73M
        coded_fragis[ncoded_fragis++]=fragi;
1633
8.73M
      }
1634
23.1M
    }
1635
1.44M
  }
1636
344k
  _pipe->ncoded_fragis[_pli]=ncoded_fragis;
1637
344k
}
1638
1639
/*Analysis stage for an INTRA frame.*/
1640
26.1k
void oc_enc_analyze_intra(oc_enc_ctx *_enc,int _recode){
1641
26.1k
  ogg_int64_t             activity_sum;
1642
26.1k
  ogg_int64_t             luma_sum;
1643
26.1k
  unsigned                activity_avg;
1644
26.1k
  unsigned                luma_avg;
1645
26.1k
  const ogg_uint16_t     *chroma_rd_scale;
1646
26.1k
  ogg_uint16_t           *mcu_rd_scale;
1647
26.1k
  ogg_uint16_t           *mcu_rd_iscale;
1648
26.1k
  const unsigned char    *map_idxs;
1649
26.1k
  int                     nmap_idxs;
1650
26.1k
  oc_sb_flags            *sb_flags;
1651
26.1k
  signed char            *mb_modes;
1652
26.1k
  const oc_mb_map        *mb_maps;
1653
26.1k
  const oc_sb_map        *sb_maps;
1654
26.1k
  oc_fragment            *frags;
1655
26.1k
  unsigned                stripe_sby;
1656
26.1k
  unsigned                mcu_nvsbs;
1657
26.1k
  int                     notstart;
1658
26.1k
  int                     notdone;
1659
26.1k
  int                     refi;
1660
26.1k
  int                     pli;
1661
26.1k
  _enc->state.frame_type=OC_INTRA_FRAME;
1662
26.1k
  oc_enc_tokenize_start(_enc);
1663
26.1k
  oc_enc_pipeline_init(_enc,&_enc->pipe);
1664
26.1k
  oc_enc_mode_rd_init(_enc);
1665
26.1k
  activity_sum=luma_sum=0;
1666
26.1k
  activity_avg=_enc->activity_avg;
1667
26.1k
  luma_avg=OC_CLAMPI(90<<8,_enc->luma_avg,160<<8);
1668
26.1k
  chroma_rd_scale=_enc->chroma_rd_scale[OC_INTRA_FRAME][_enc->state.qis[0]];
1669
26.1k
  mcu_rd_scale=_enc->mcu_rd_scale;
1670
26.1k
  mcu_rd_iscale=_enc->mcu_rd_iscale;
1671
  /*Choose MVs and MB modes and quantize and code luma.
1672
    Must be done in Hilbert order.*/
1673
26.1k
  map_idxs=OC_MB_MAP_IDXS[_enc->state.info.pixel_fmt];
1674
26.1k
  nmap_idxs=OC_MB_MAP_NIDXS[_enc->state.info.pixel_fmt];
1675
26.1k
  _enc->state.ncoded_fragis[0]=0;
1676
26.1k
  _enc->state.ncoded_fragis[1]=0;
1677
26.1k
  _enc->state.ncoded_fragis[2]=0;
1678
26.1k
  sb_flags=_enc->state.sb_flags;
1679
26.1k
  mb_modes=_enc->state.mb_modes;
1680
26.1k
  mb_maps=(const oc_mb_map *)_enc->state.mb_maps;
1681
26.1k
  sb_maps=(const oc_sb_map *)_enc->state.sb_maps;
1682
26.1k
  frags=_enc->state.frags;
1683
26.1k
  notstart=0;
1684
26.1k
  notdone=1;
1685
26.1k
  mcu_nvsbs=_enc->mcu_nvsbs;
1686
198k
  for(stripe_sby=0;notdone;stripe_sby+=mcu_nvsbs){
1687
172k
    ptrdiff_t cfroffset;
1688
172k
    unsigned  sbi;
1689
172k
    unsigned  sbi_end;
1690
172k
    notdone=oc_enc_pipeline_set_stripe(_enc,&_enc->pipe,stripe_sby);
1691
172k
    sbi_end=_enc->pipe.sbi_end[0];
1692
172k
    cfroffset=_enc->pipe.froffset[1];
1693
1.31M
    for(sbi=_enc->pipe.sbi0[0];sbi<sbi_end;sbi++){
1694
1.13M
      int quadi;
1695
      /*Mode addressing is through Y plane, always 4 MB per SB.*/
1696
5.69M
      for(quadi=0;quadi<4;quadi++)if(sb_flags[sbi].quad_valid&1<<quadi){
1697
2.45M
        unsigned  activity[4];
1698
2.45M
        unsigned  rd_scale[5];
1699
2.45M
        unsigned  rd_iscale[5];
1700
2.45M
        unsigned  luma;
1701
2.45M
        unsigned  mbi;
1702
2.45M
        int       mapii;
1703
2.45M
        int       mapi;
1704
2.45M
        int       bi;
1705
2.45M
        ptrdiff_t fragi;
1706
2.45M
        mbi=sbi<<2|quadi;
1707
        /*Activity masking.*/
1708
2.45M
        if(_enc->sp_level<OC_SP_LEVEL_FAST_ANALYSIS){
1709
2.45M
          luma=oc_mb_activity(_enc,mbi,activity);
1710
2.45M
        }
1711
0
        else{
1712
0
          unsigned intra_satd[12];
1713
0
          luma=oc_mb_intra_satd(_enc,mbi,intra_satd);
1714
0
          oc_mb_activity_fast(_enc,mbi,activity,intra_satd);
1715
0
          for(bi=0;bi<4;bi++)frags[sb_maps[mbi>>2][mbi&3][bi]].qii=0;
1716
0
        }
1717
2.45M
        activity_sum+=oc_mb_masking(rd_scale,rd_iscale,
1718
2.45M
         chroma_rd_scale,activity,activity_avg,luma,luma_avg);
1719
2.45M
        luma_sum+=luma;
1720
        /*Motion estimation:
1721
          We do a basic 1MV search for all macroblocks, coded or not,
1722
           keyframe or not, unless we aren't using motion estimation at all.*/
1723
2.45M
        if(!_recode&&_enc->state.curframe_num>0&&
1724
7.00k
         _enc->sp_level<OC_SP_LEVEL_NOMC&&_enc->keyframe_frequency_force>1){
1725
435
          oc_mcenc_search(_enc,mbi);
1726
435
        }
1727
2.45M
        if(_enc->sp_level<OC_SP_LEVEL_FAST_ANALYSIS){
1728
2.45M
          oc_analyze_intra_mb_luma(_enc,_enc->pipe.qs+0,mbi,rd_scale);
1729
2.45M
        }
1730
2.45M
        mb_modes[mbi]=OC_MODE_INTRA;
1731
2.45M
        oc_enc_mb_transform_quantize_intra_luma(_enc,&_enc->pipe,
1732
2.45M
         mbi,rd_scale,rd_iscale);
1733
        /*Propagate final MB mode and MVs to the chroma blocks.*/
1734
11.1M
        for(mapii=4;mapii<nmap_idxs;mapii++){
1735
8.73M
          mapi=map_idxs[mapii];
1736
8.73M
          pli=mapi>>2;
1737
8.73M
          bi=mapi&3;
1738
8.73M
          fragi=mb_maps[mbi][pli][bi];
1739
8.73M
          frags[fragi].refi=OC_FRAME_SELF;
1740
8.73M
          frags[fragi].mb_mode=OC_MODE_INTRA;
1741
8.73M
        }
1742
        /*Save masking scale factors for chroma blocks.*/
1743
6.82M
        for(mapii=4;mapii<(nmap_idxs-4>>1)+4;mapii++){
1744
4.36M
          mapi=map_idxs[mapii];
1745
4.36M
          bi=mapi&3;
1746
4.36M
          fragi=mb_maps[mbi][1][bi];
1747
4.36M
          mcu_rd_scale[fragi-cfroffset]=(ogg_uint16_t)rd_scale[4];
1748
4.36M
          mcu_rd_iscale[fragi-cfroffset]=(ogg_uint16_t)rd_iscale[4];
1749
4.36M
        }
1750
2.45M
      }
1751
1.13M
    }
1752
172k
    oc_enc_pipeline_finish_mcu_plane(_enc,&_enc->pipe,0,notstart,notdone);
1753
    /*Code chroma planes.*/
1754
516k
    for(pli=1;pli<3;pli++){
1755
344k
      oc_enc_sb_transform_quantize_intra_chroma(_enc,&_enc->pipe,
1756
344k
       pli,_enc->pipe.sbi0[pli],_enc->pipe.sbi_end[pli]);
1757
344k
      oc_enc_pipeline_finish_mcu_plane(_enc,&_enc->pipe,pli,notstart,notdone);
1758
344k
    }
1759
172k
    notstart=1;
1760
172k
  }
1761
  /*Compute the average block activity and MB luma score for the frame.*/
1762
26.1k
  _enc->activity_avg=OC_MAXI(OC_ACTIVITY_AVG_MIN,
1763
26.1k
   (unsigned)((activity_sum+(_enc->state.fplanes[0].nfrags>>1))/
1764
26.1k
   _enc->state.fplanes[0].nfrags));
1765
26.1k
  _enc->luma_avg=(unsigned)((luma_sum+(_enc->state.nmbs>>1))/_enc->state.nmbs);
1766
  /*Finish filling in the reference frame borders.*/
1767
26.1k
  refi=_enc->state.ref_frame_idx[OC_FRAME_SELF];
1768
104k
  for(pli=0;pli<3;pli++)oc_state_borders_fill_caps(&_enc->state,refi,pli);
1769
26.1k
  _enc->state.ntotal_coded_fragis=_enc->state.nfrags;
1770
26.1k
}
1771
1772
1773
1774
/*Cost information about a MB mode.*/
1775
struct oc_mode_choice{
1776
  unsigned      cost;
1777
  unsigned      ssd;
1778
  unsigned      rate;
1779
  unsigned      overhead;
1780
  unsigned char qii[12];
1781
};
1782
1783
1784
1785
4.19M
static void oc_mode_set_cost(oc_mode_choice *_modec,int _lambda){
1786
4.19M
  _modec->cost=OC_MODE_RD_COST(_modec->ssd,
1787
4.19M
   _modec->rate+_modec->overhead,_lambda);
1788
4.19M
}
1789
1790
/*A set of skip SSD's to use to disable early skipping.*/
1791
static const unsigned OC_NOSKIP[12]={
1792
  UINT_MAX,UINT_MAX,UINT_MAX,UINT_MAX,
1793
  UINT_MAX,UINT_MAX,UINT_MAX,UINT_MAX,
1794
  UINT_MAX,UINT_MAX,UINT_MAX,UINT_MAX
1795
};
1796
1797
/*The estimated number of bits used by a coded chroma block to specify the AC
1798
   quantizer.
1799
  TODO: Currently this is just 0.5*log2(3) (estimating about 50% compression);
1800
   measurements suggest this is in the right ballpark, but it varies somewhat
1801
   with lambda.*/
1802
7.12M
#define OC_CHROMA_QII_RATE ((0xCAE00D1DU>>31-OC_BIT_SCALE)+1>>1)
1803
1804
static void oc_analyze_mb_mode_luma(oc_enc_ctx *_enc,
1805
 oc_mode_choice *_modec,const oc_fr_state *_fr,const oc_qii_state *_qs,
1806
 const unsigned _frag_satd[12],const unsigned _skip_ssd[12],
1807
3.20M
 const unsigned _rd_scale[4],int _qti){
1808
3.20M
  oc_fr_state  fr;
1809
3.20M
  oc_qii_state qs;
1810
3.20M
  unsigned     ssd;
1811
3.20M
  unsigned     rate;
1812
3.20M
  unsigned     satd;
1813
3.20M
  unsigned     best_ssd;
1814
3.20M
  unsigned     best_rate;
1815
3.20M
  int          best_fri;
1816
3.20M
  int          best_qii;
1817
3.20M
  int          lambda;
1818
3.20M
  int          nqis;
1819
3.20M
  int          nskipped;
1820
3.20M
  int          bi;
1821
3.20M
  lambda=_enc->lambda;
1822
3.20M
  nqis=_enc->state.nqis;
1823
  /*We could do a trellis optimization here, but we don't make final skip
1824
     decisions until after transform+quantization, so the result wouldn't be
1825
     optimal anyway.
1826
    Instead we just use a greedy approach; for most SATD values, the
1827
     differences between the qiis are large enough to drown out the cost to
1828
     code the flags, anyway.*/
1829
3.20M
  *&fr=*_fr;
1830
3.20M
  *&qs=*_qs;
1831
3.20M
  ssd=rate=nskipped=0;
1832
16.0M
  for(bi=0;bi<4;bi++){
1833
12.8M
    oc_fr_state  ft[2];
1834
12.8M
    oc_qii_state qt[3];
1835
12.8M
    unsigned     best_cost;
1836
12.8M
    unsigned     cur_cost;
1837
12.8M
    unsigned     cur_ssd;
1838
12.8M
    unsigned     cur_rate;
1839
12.8M
    unsigned     cur_overhead;
1840
12.8M
    int          qii;
1841
12.8M
    satd=_frag_satd[bi];
1842
12.8M
    *(ft+0)=*&fr;
1843
12.8M
    oc_fr_code_block(ft+0);
1844
12.8M
    cur_overhead=ft[0].bits-fr.bits;
1845
12.8M
    best_rate=oc_dct_cost2(_enc,&best_ssd,0,0,_qti,satd)
1846
12.8M
     +(cur_overhead<<OC_BIT_SCALE);
1847
12.8M
    if(nqis>1){
1848
6.20M
      oc_qii_state_advance(qt+0,&qs,0);
1849
6.20M
      best_rate+=qt[0].bits-qs.bits<<OC_BIT_SCALE;
1850
6.20M
    }
1851
12.8M
    best_ssd=OC_RD_SCALE(best_ssd,_rd_scale[bi]);
1852
12.8M
    best_cost=OC_MODE_RD_COST(ssd+best_ssd,rate+best_rate,lambda);
1853
12.8M
    best_fri=0;
1854
12.8M
    best_qii=0;
1855
22.0M
    for(qii=1;qii<nqis;qii++){
1856
9.19M
      oc_qii_state_advance(qt+qii,&qs,qii);
1857
9.19M
      cur_rate=oc_dct_cost2(_enc,&cur_ssd,qii,0,_qti,satd)
1858
9.19M
       +(cur_overhead+qt[qii].bits-qs.bits<<OC_BIT_SCALE);
1859
9.19M
      cur_ssd=OC_RD_SCALE(cur_ssd,_rd_scale[bi]);
1860
9.19M
      cur_cost=OC_MODE_RD_COST(ssd+cur_ssd,rate+cur_rate,lambda);
1861
9.19M
      if(cur_cost<best_cost){
1862
2.89M
        best_cost=cur_cost;
1863
2.89M
        best_ssd=cur_ssd;
1864
2.89M
        best_rate=cur_rate;
1865
2.89M
        best_qii=qii;
1866
2.89M
      }
1867
9.19M
    }
1868
12.8M
    if(_skip_ssd[bi]<(UINT_MAX>>OC_BIT_SCALE+2)&&nskipped<3){
1869
6.19M
      *(ft+1)=*&fr;
1870
6.19M
      oc_fr_skip_block(ft+1);
1871
6.19M
      cur_overhead=ft[1].bits-fr.bits<<OC_BIT_SCALE;
1872
6.19M
      cur_ssd=_skip_ssd[bi]<<OC_BIT_SCALE;
1873
6.19M
      cur_cost=OC_MODE_RD_COST(ssd+cur_ssd,rate+cur_overhead,lambda);
1874
6.19M
      if(cur_cost<=best_cost){
1875
2.29M
        best_ssd=cur_ssd;
1876
2.29M
        best_rate=cur_overhead;
1877
2.29M
        best_fri=1;
1878
2.29M
        best_qii+=4;
1879
2.29M
      }
1880
6.19M
    }
1881
12.8M
    rate+=best_rate;
1882
12.8M
    ssd+=best_ssd;
1883
12.8M
    *&fr=*(ft+best_fri);
1884
12.8M
    if(best_fri==0)*&qs=*(qt+best_qii);
1885
2.29M
    else nskipped++;
1886
12.8M
    _modec->qii[bi]=best_qii;
1887
12.8M
  }
1888
3.20M
  _modec->ssd=ssd;
1889
3.20M
  _modec->rate=rate;
1890
3.20M
}
1891
1892
static void oc_analyze_mb_mode_chroma(oc_enc_ctx *_enc,
1893
 oc_mode_choice *_modec,const oc_fr_state *_fr,const oc_qii_state *_qs,
1894
 const unsigned _frag_satd[12],const unsigned _skip_ssd[12],
1895
3.20M
 unsigned _rd_scale,int _qti){
1896
3.20M
  unsigned ssd;
1897
3.20M
  unsigned rate;
1898
3.20M
  unsigned satd;
1899
3.20M
  unsigned best_ssd;
1900
3.20M
  unsigned best_rate;
1901
3.20M
  int      best_qii;
1902
3.20M
  unsigned cur_cost;
1903
3.20M
  unsigned cur_ssd;
1904
3.20M
  unsigned cur_rate;
1905
3.20M
  int      lambda;
1906
3.20M
  int      nblocks;
1907
3.20M
  int      nqis;
1908
3.20M
  int      pli;
1909
3.20M
  int      bi;
1910
3.20M
  int      qii;
1911
3.20M
  lambda=_enc->lambda;
1912
  /*Most chroma blocks have no AC coefficients to speak of anyway, so it's not
1913
     worth spending the bits to change the AC quantizer.
1914
    TODO: This may be worth revisiting when we separate out DC and AC
1915
     predictions from SATD.*/
1916
#if 0
1917
  nqis=_enc->state.nqis;
1918
#else
1919
3.20M
  nqis=1;
1920
3.20M
#endif
1921
3.20M
  ssd=_modec->ssd;
1922
3.20M
  rate=_modec->rate;
1923
  /*Because (except in 4:4:4 mode) we aren't considering chroma blocks in coded
1924
     order, we assume a constant overhead for coded block and qii flags.*/
1925
3.20M
  nblocks=OC_MB_MAP_NIDXS[_enc->state.info.pixel_fmt];
1926
3.20M
  nblocks=(nblocks-4>>1)+4;
1927
3.20M
  bi=4;
1928
9.60M
  for(pli=1;pli<3;pli++){
1929
13.5M
    for(;bi<nblocks;bi++){
1930
7.12M
      unsigned best_cost;
1931
7.12M
      satd=_frag_satd[bi];
1932
7.12M
      best_rate=oc_dct_cost2(_enc,&best_ssd,0,pli,_qti,satd)
1933
7.12M
       +OC_CHROMA_QII_RATE;
1934
7.12M
      best_ssd=OC_RD_SCALE(best_ssd,_rd_scale);
1935
7.12M
      best_cost=OC_MODE_RD_COST(ssd+best_ssd,rate+best_rate,lambda);
1936
7.12M
      best_qii=0;
1937
7.12M
      for(qii=1;qii<nqis;qii++){
1938
0
        cur_rate=oc_dct_cost2(_enc,&cur_ssd,qii,pli,_qti,satd)
1939
0
         +OC_CHROMA_QII_RATE;
1940
0
        cur_ssd=OC_RD_SCALE(cur_ssd,_rd_scale);
1941
0
        cur_cost=OC_MODE_RD_COST(ssd+cur_ssd,rate+cur_rate,lambda);
1942
0
        if(cur_cost<best_cost){
1943
0
          best_cost=cur_cost;
1944
0
          best_ssd=cur_ssd;
1945
0
          best_rate=cur_rate;
1946
0
          best_qii=qii;
1947
0
        }
1948
0
      }
1949
7.12M
      if(_skip_ssd[bi]<(UINT_MAX>>OC_BIT_SCALE+2)){
1950
4.98M
        cur_ssd=_skip_ssd[bi]<<OC_BIT_SCALE;
1951
4.98M
        cur_cost=OC_MODE_RD_COST(ssd+cur_ssd,rate,lambda);
1952
4.98M
        if(cur_cost<=best_cost){
1953
2.69M
          best_ssd=cur_ssd;
1954
2.69M
          best_rate=0;
1955
2.69M
          best_qii+=4;
1956
2.69M
        }
1957
4.98M
      }
1958
7.12M
      rate+=best_rate;
1959
7.12M
      ssd+=best_ssd;
1960
7.12M
      _modec->qii[bi]=best_qii;
1961
7.12M
    }
1962
6.40M
    nblocks=(nblocks-4<<1)+4;
1963
6.40M
  }
1964
3.20M
  _modec->ssd=ssd;
1965
3.20M
  _modec->rate=rate;
1966
3.20M
}
1967
1968
static void oc_skip_cost(oc_enc_ctx *_enc,oc_enc_pipeline_state *_pipe,
1969
308k
 unsigned _mbi,const unsigned _rd_scale[4],unsigned _ssd[12]){
1970
308k
  const unsigned char   *src;
1971
308k
  const unsigned char   *ref;
1972
308k
  int                    ystride;
1973
308k
  const oc_fragment     *frags;
1974
308k
  const ptrdiff_t       *frag_buf_offs;
1975
308k
  const ptrdiff_t       *sb_map;
1976
308k
  const oc_mb_map_plane *mb_map;
1977
308k
  const unsigned char   *map_idxs;
1978
308k
  oc_mv                 *mvs;
1979
308k
  int                    map_nidxs;
1980
308k
  unsigned               uncoded_ssd;
1981
308k
  int                    mapii;
1982
308k
  int                    mapi;
1983
308k
  int                    pli;
1984
308k
  int                    bi;
1985
308k
  ptrdiff_t              fragi;
1986
308k
  ptrdiff_t              frag_offs;
1987
308k
  int                    borderi;
1988
308k
  src=_enc->state.ref_frame_data[OC_FRAME_IO];
1989
308k
  ref=_enc->state.ref_frame_data[OC_FRAME_PREV];
1990
308k
  ystride=_enc->state.ref_ystride[0];
1991
308k
  frags=_enc->state.frags;
1992
308k
  frag_buf_offs=_enc->state.frag_buf_offs;
1993
308k
  sb_map=_enc->state.sb_maps[_mbi>>2][_mbi&3];
1994
308k
  mvs=_enc->mb_info[_mbi].block_mv;
1995
1.54M
  for(bi=0;bi<4;bi++){
1996
1.23M
    fragi=sb_map[bi];
1997
1.23M
    borderi=frags[fragi].borderi;
1998
1.23M
    frag_offs=frag_buf_offs[fragi];
1999
1.23M
    if(borderi<0){
2000
719k
      uncoded_ssd=oc_enc_frag_ssd(_enc,src+frag_offs,ref+frag_offs,ystride);
2001
719k
    }
2002
516k
    else{
2003
516k
      uncoded_ssd=oc_enc_frag_border_ssd(_enc,
2004
516k
       src+frag_offs,ref+frag_offs,ystride,_enc->state.borders[borderi].mask);
2005
516k
    }
2006
    /*Scale to match DCT domain and RD.*/
2007
1.23M
    uncoded_ssd=OC_RD_SKIP_SCALE(uncoded_ssd,_rd_scale[bi]);
2008
    /*Motion is a special case; if there is more than a full-pixel motion
2009
       against the prior frame, penalize skipping.
2010
      TODO: The factor of two here is a kludge, but it tested out better than a
2011
       hard limit.*/
2012
1.23M
    if(mvs[bi]!=0)uncoded_ssd*=2;
2013
1.23M
    _pipe->skip_ssd[0][fragi-_pipe->froffset[0]]=_ssd[bi]=uncoded_ssd;
2014
1.23M
  }
2015
308k
  mb_map=(const oc_mb_map_plane *)_enc->state.mb_maps[_mbi];
2016
308k
  map_nidxs=OC_MB_MAP_NIDXS[_enc->state.info.pixel_fmt];
2017
308k
  map_idxs=OC_MB_MAP_IDXS[_enc->state.info.pixel_fmt];
2018
308k
  map_nidxs=(map_nidxs-4>>1)+4;
2019
308k
  mapii=4;
2020
308k
  mvs=_enc->mb_info[_mbi].unref_mv;
2021
926k
  for(pli=1;pli<3;pli++){
2022
617k
    ystride=_enc->state.ref_ystride[pli];
2023
1.30M
    for(;mapii<map_nidxs;mapii++){
2024
686k
      mapi=map_idxs[mapii];
2025
686k
      bi=mapi&3;
2026
686k
      fragi=mb_map[pli][bi];
2027
686k
      borderi=frags[fragi].borderi;
2028
686k
      frag_offs=frag_buf_offs[fragi];
2029
686k
      if(borderi<0){
2030
385k
        uncoded_ssd=oc_enc_frag_ssd(_enc,src+frag_offs,ref+frag_offs,ystride);
2031
385k
      }
2032
300k
      else{
2033
300k
        uncoded_ssd=oc_enc_frag_border_ssd(_enc,
2034
300k
         src+frag_offs,ref+frag_offs,ystride,_enc->state.borders[borderi].mask);
2035
300k
      }
2036
      /*Scale to match DCT domain and RD.*/
2037
686k
      uncoded_ssd=OC_RD_SKIP_SCALE(uncoded_ssd,_rd_scale[4]);
2038
      /*Motion is a special case; if there is more than a full-pixel motion
2039
         against the prior frame, penalize skipping.
2040
        TODO: The factor of two here is a kludge, but it tested out better than
2041
         a hard limit*/
2042
686k
      if(mvs[OC_FRAME_PREV]!=0)uncoded_ssd*=2;
2043
686k
      _pipe->skip_ssd[pli][fragi-_pipe->froffset[pli]]=_ssd[mapii]=uncoded_ssd;
2044
686k
    }
2045
617k
    map_nidxs=(map_nidxs-4<<1)+4;
2046
617k
  }
2047
308k
}
2048
2049
2050
static void oc_cost_intra(oc_enc_ctx *_enc,oc_mode_choice *_modec,
2051
 unsigned _mbi,const oc_fr_state *_fr,const oc_qii_state *_qs,
2052
 const unsigned _frag_satd[12],const unsigned _skip_ssd[12],
2053
617k
 const unsigned _rd_scale[5]){
2054
617k
  oc_analyze_mb_mode_luma(_enc,_modec,_fr,_qs,_frag_satd,_skip_ssd,_rd_scale,0);
2055
617k
  oc_analyze_mb_mode_chroma(_enc,_modec,_fr,_qs,
2056
617k
   _frag_satd,_skip_ssd,_rd_scale[4],0);
2057
617k
  _modec->overhead=
2058
617k
   oc_mode_scheme_chooser_cost(&_enc->chooser,OC_MODE_INTRA)<<OC_BIT_SCALE;
2059
617k
  oc_mode_set_cost(_modec,_enc->lambda);
2060
617k
}
2061
2062
static void oc_cost_inter(oc_enc_ctx *_enc,oc_mode_choice *_modec,
2063
 unsigned _mbi,int _mb_mode,oc_mv _mv,
2064
 const oc_fr_state *_fr,const oc_qii_state *_qs,
2065
2.22M
 const unsigned _skip_ssd[12],const unsigned _rd_scale[5]){
2066
2.22M
  unsigned               frag_satd[12];
2067
2.22M
  const unsigned char   *src;
2068
2.22M
  const unsigned char   *ref;
2069
2.22M
  int                    ystride;
2070
2.22M
  const ptrdiff_t       *frag_buf_offs;
2071
2.22M
  const ptrdiff_t       *sb_map;
2072
2.22M
  const oc_mb_map_plane *mb_map;
2073
2.22M
  const unsigned char   *map_idxs;
2074
2.22M
  int                    map_nidxs;
2075
2.22M
  int                    mapii;
2076
2.22M
  int                    mapi;
2077
2.22M
  int                    mv_offs[2];
2078
2.22M
  int                    pli;
2079
2.22M
  int                    bi;
2080
2.22M
  ptrdiff_t              fragi;
2081
2.22M
  ptrdiff_t              frag_offs;
2082
2.22M
  int                    dc;
2083
2.22M
  src=_enc->state.ref_frame_data[OC_FRAME_IO];
2084
2.22M
  ref=_enc->state.ref_frame_data[OC_FRAME_FOR_MODE(_mb_mode)];
2085
2.22M
  ystride=_enc->state.ref_ystride[0];
2086
2.22M
  frag_buf_offs=_enc->state.frag_buf_offs;
2087
2.22M
  sb_map=_enc->state.sb_maps[_mbi>>2][_mbi&3];
2088
2.22M
  _modec->rate=_modec->ssd=0;
2089
2.22M
  if(oc_state_get_mv_offsets(&_enc->state,mv_offs,0,_mv)>1){
2090
2.29M
    for(bi=0;bi<4;bi++){
2091
1.83M
      fragi=sb_map[bi];
2092
1.83M
      frag_offs=frag_buf_offs[fragi];
2093
1.83M
      if(_enc->sp_level<OC_SP_LEVEL_NOSATD){
2094
1.83M
        frag_satd[bi]=oc_enc_frag_satd2(_enc,&dc,src+frag_offs,
2095
1.83M
         ref+frag_offs+mv_offs[0],ref+frag_offs+mv_offs[1],ystride);
2096
1.83M
        frag_satd[bi]+=abs(dc);
2097
1.83M
      }
2098
0
      else{
2099
0
        frag_satd[bi]=oc_enc_frag_sad2_thresh(_enc,src+frag_offs,
2100
0
         ref+frag_offs+mv_offs[0],ref+frag_offs+mv_offs[1],ystride,UINT_MAX);
2101
0
      }
2102
1.83M
    }
2103
458k
  }
2104
1.76M
  else{
2105
8.84M
    for(bi=0;bi<4;bi++){
2106
7.07M
      fragi=sb_map[bi];
2107
7.07M
      frag_offs=frag_buf_offs[fragi];
2108
7.07M
      if(_enc->sp_level<OC_SP_LEVEL_NOSATD){
2109
7.07M
        frag_satd[bi]=oc_enc_frag_satd(_enc,&dc,src+frag_offs,
2110
7.07M
         ref+frag_offs+mv_offs[0],ystride);
2111
7.07M
        frag_satd[bi]+=abs(dc);
2112
7.07M
      }
2113
0
      else{
2114
0
        frag_satd[bi]=oc_enc_frag_sad(_enc,src+frag_offs,
2115
0
         ref+frag_offs+mv_offs[0],ystride);
2116
0
      }
2117
7.07M
    }
2118
1.76M
  }
2119
2.22M
  mb_map=(const oc_mb_map_plane *)_enc->state.mb_maps[_mbi];
2120
2.22M
  map_idxs=OC_MB_MAP_IDXS[_enc->state.info.pixel_fmt];
2121
2.22M
  map_nidxs=OC_MB_MAP_NIDXS[_enc->state.info.pixel_fmt];
2122
  /*Note: This assumes ref_ystride[1]==ref_ystride[2].*/
2123
2.22M
  ystride=_enc->state.ref_ystride[1];
2124
2.22M
  if(oc_state_get_mv_offsets(&_enc->state,mv_offs,1,_mv)>1){
2125
2.60M
    for(mapii=4;mapii<map_nidxs;mapii++){
2126
1.77M
      mapi=map_idxs[mapii];
2127
1.77M
      pli=mapi>>2;
2128
1.77M
      bi=mapi&3;
2129
1.77M
      fragi=mb_map[pli][bi];
2130
1.77M
      frag_offs=frag_buf_offs[fragi];
2131
1.77M
      if(_enc->sp_level<OC_SP_LEVEL_NOSATD){
2132
1.77M
        frag_satd[mapii]=oc_enc_frag_satd2(_enc,&dc,src+frag_offs,
2133
1.77M
         ref+frag_offs+mv_offs[0],ref+frag_offs+mv_offs[1],ystride);
2134
1.77M
        frag_satd[mapii]+=abs(dc);
2135
1.77M
      }
2136
0
      else{
2137
0
        frag_satd[mapii]=oc_enc_frag_sad2_thresh(_enc,src+frag_offs,
2138
0
         ref+frag_offs+mv_offs[0],ref+frag_offs+mv_offs[1],ystride,UINT_MAX);
2139
0
      }
2140
1.77M
    }
2141
832k
  }
2142
1.39M
  else{
2143
4.57M
    for(mapii=4;mapii<map_nidxs;mapii++){
2144
3.18M
      mapi=map_idxs[mapii];
2145
3.18M
      pli=mapi>>2;
2146
3.18M
      bi=mapi&3;
2147
3.18M
      fragi=mb_map[pli][bi];
2148
3.18M
      frag_offs=frag_buf_offs[fragi];
2149
3.18M
      if(_enc->sp_level<OC_SP_LEVEL_NOSATD){
2150
3.18M
        frag_satd[mapii]=oc_enc_frag_satd(_enc,&dc,src+frag_offs,
2151
3.18M
         ref+frag_offs+mv_offs[0],ystride);
2152
3.18M
        frag_satd[mapii]+=abs(dc);
2153
3.18M
      }
2154
0
      else{
2155
0
        frag_satd[mapii]=oc_enc_frag_sad(_enc,src+frag_offs,
2156
0
         ref+frag_offs+mv_offs[0],ystride);
2157
0
      }
2158
3.18M
    }
2159
1.39M
  }
2160
2.22M
  oc_analyze_mb_mode_luma(_enc,_modec,_fr,_qs,frag_satd,_skip_ssd,_rd_scale,1);
2161
2.22M
  oc_analyze_mb_mode_chroma(_enc,_modec,_fr,_qs,
2162
2.22M
   frag_satd,_skip_ssd,_rd_scale[4],1);
2163
2.22M
  _modec->overhead=
2164
2.22M
   oc_mode_scheme_chooser_cost(&_enc->chooser,_mb_mode)<<OC_BIT_SCALE;
2165
2.22M
  oc_mode_set_cost(_modec,_enc->lambda);
2166
2.22M
}
2167
2168
static void oc_cost_inter_nomv(oc_enc_ctx *_enc,oc_mode_choice *_modec,
2169
 unsigned _mbi,int _mb_mode,const oc_fr_state *_fr,const oc_qii_state *_qs,
2170
617k
 const unsigned _skip_ssd[12],const unsigned _rd_scale[5]){
2171
617k
  oc_cost_inter(_enc,_modec,_mbi,_mb_mode,0,_fr,_qs,_skip_ssd,_rd_scale);
2172
617k
}
2173
2174
static int oc_cost_inter1mv(oc_enc_ctx *_enc,oc_mode_choice *_modec,
2175
 unsigned _mbi,int _mb_mode,oc_mv _mv,
2176
 const oc_fr_state *_fr,const oc_qii_state *_qs,const unsigned _skip_ssd[12],
2177
992k
 const unsigned _rd_scale[5]){
2178
992k
  int bits0;
2179
992k
  oc_cost_inter(_enc,_modec,_mbi,_mb_mode,_mv,_fr,_qs,_skip_ssd,_rd_scale);
2180
992k
  bits0=OC_MV_BITS[0][OC_MV_X(_mv)+31]+OC_MV_BITS[0][OC_MV_Y(_mv)+31];
2181
992k
  _modec->overhead+=OC_MINI(_enc->mv_bits[0]+bits0,_enc->mv_bits[1]+12)
2182
992k
   -OC_MINI(_enc->mv_bits[0],_enc->mv_bits[1])<<OC_BIT_SCALE;
2183
992k
  oc_mode_set_cost(_modec,_enc->lambda);
2184
992k
  return bits0;
2185
992k
}
2186
2187
/*A mapping from oc_mb_map (raster) ordering to oc_sb_map (Hilbert) ordering.*/
2188
static const unsigned char OC_MB_PHASE[4][4]={
2189
  {0,1,3,2},{0,3,1,2},{0,3,1,2},{2,3,1,0}
2190
};
2191
2192
static void oc_cost_inter4mv(oc_enc_ctx *_enc,oc_mode_choice *_modec,
2193
 unsigned _mbi,oc_mv _mv[4],const oc_fr_state *_fr,const oc_qii_state *_qs,
2194
357k
 const unsigned _skip_ssd[12],const unsigned _rd_scale[5]){
2195
357k
  unsigned               frag_satd[12];
2196
357k
  oc_mv                  lbmvs[4];
2197
357k
  oc_mv                  cbmvs[4];
2198
357k
  const unsigned char   *src;
2199
357k
  const unsigned char   *ref;
2200
357k
  int                    ystride;
2201
357k
  const ptrdiff_t       *frag_buf_offs;
2202
357k
  oc_mv                 *frag_mvs;
2203
357k
  const oc_mb_map_plane *mb_map;
2204
357k
  const unsigned char   *map_idxs;
2205
357k
  int                    map_nidxs;
2206
357k
  int                    nqis;
2207
357k
  int                    mapii;
2208
357k
  int                    mapi;
2209
357k
  int                    mv_offs[2];
2210
357k
  int                    pli;
2211
357k
  int                    bi;
2212
357k
  ptrdiff_t              fragi;
2213
357k
  ptrdiff_t              frag_offs;
2214
357k
  int                    bits0;
2215
357k
  int                    bits1;
2216
357k
  unsigned               satd;
2217
357k
  int                    dc;
2218
357k
  src=_enc->state.ref_frame_data[OC_FRAME_IO];
2219
357k
  ref=_enc->state.ref_frame_data[OC_FRAME_PREV];
2220
357k
  ystride=_enc->state.ref_ystride[0];
2221
357k
  frag_buf_offs=_enc->state.frag_buf_offs;
2222
357k
  frag_mvs=_enc->state.frag_mvs;
2223
357k
  mb_map=(const oc_mb_map_plane *)_enc->state.mb_maps[_mbi];
2224
357k
  _modec->rate=_modec->ssd=0;
2225
1.78M
  for(bi=0;bi<4;bi++){
2226
1.43M
    fragi=mb_map[0][bi];
2227
    /*Save the block MVs as the current ones while we're here; we'll replace
2228
       them if we don't ultimately choose 4MV mode.*/
2229
1.43M
    frag_mvs[fragi]=_mv[bi];
2230
1.43M
    frag_offs=frag_buf_offs[fragi];
2231
1.43M
    if(oc_state_get_mv_offsets(&_enc->state,mv_offs,0,_mv[bi])>1){
2232
168k
      satd=oc_enc_frag_satd2(_enc,&dc,src+frag_offs,
2233
168k
       ref+frag_offs+mv_offs[0],ref+frag_offs+mv_offs[1],ystride);
2234
168k
    }
2235
1.26M
    else{
2236
1.26M
      satd=oc_enc_frag_satd(_enc,&dc,src+frag_offs,
2237
1.26M
       ref+frag_offs+mv_offs[0],ystride);
2238
1.26M
    }
2239
1.43M
    frag_satd[OC_MB_PHASE[_mbi&3][bi]]=satd+abs(dc);
2240
1.43M
  }
2241
357k
  oc_analyze_mb_mode_luma(_enc,_modec,_fr,_qs,frag_satd,
2242
357k
   _enc->vp3_compatible?OC_NOSKIP:_skip_ssd,_rd_scale,1);
2243
  /*Figure out which blocks are being skipped and give them (0,0) MVs.*/
2244
357k
  bits0=0;
2245
357k
  bits1=0;
2246
357k
  nqis=_enc->state.nqis;
2247
1.78M
  for(bi=0;bi<4;bi++){
2248
1.43M
    if(_modec->qii[OC_MB_PHASE[_mbi&3][bi]]>=nqis)lbmvs[bi]=0;
2249
1.16M
    else{
2250
1.16M
      lbmvs[bi]=_mv[bi];
2251
1.16M
      bits0+=OC_MV_BITS[0][OC_MV_X(_mv[bi])+31]
2252
1.16M
       +OC_MV_BITS[0][OC_MV_Y(_mv[bi])+31];
2253
1.16M
      bits1+=12;
2254
1.16M
    }
2255
1.43M
  }
2256
357k
  (*OC_SET_CHROMA_MVS_TABLE[_enc->state.info.pixel_fmt])(cbmvs,lbmvs);
2257
357k
  map_idxs=OC_MB_MAP_IDXS[_enc->state.info.pixel_fmt];
2258
357k
  map_nidxs=OC_MB_MAP_NIDXS[_enc->state.info.pixel_fmt];
2259
  /*Note: This assumes ref_ystride[1]==ref_ystride[2].*/
2260
357k
  ystride=_enc->state.ref_ystride[1];
2261
1.15M
  for(mapii=4;mapii<map_nidxs;mapii++){
2262
801k
    mapi=map_idxs[mapii];
2263
801k
    pli=mapi>>2;
2264
801k
    bi=mapi&3;
2265
801k
    fragi=mb_map[pli][bi];
2266
801k
    frag_offs=frag_buf_offs[fragi];
2267
    /*TODO: We could save half these calls by re-using the results for the Cb
2268
       and Cr planes; is it worth it?*/
2269
801k
    if(oc_state_get_mv_offsets(&_enc->state,mv_offs,pli,cbmvs[bi])>1){
2270
514k
      satd=oc_enc_frag_satd2(_enc,&dc,src+frag_offs,
2271
514k
       ref+frag_offs+mv_offs[0],ref+frag_offs+mv_offs[1],ystride);
2272
514k
    }
2273
286k
    else{
2274
286k
      satd=oc_enc_frag_satd(_enc,&dc,src+frag_offs,
2275
286k
       ref+frag_offs+mv_offs[0],ystride);
2276
286k
    }
2277
801k
    frag_satd[mapii]=satd+abs(dc);
2278
801k
  }
2279
357k
  oc_analyze_mb_mode_chroma(_enc,_modec,_fr,_qs,
2280
357k
   frag_satd,_skip_ssd,_rd_scale[4],1);
2281
357k
  _modec->overhead=
2282
357k
   oc_mode_scheme_chooser_cost(&_enc->chooser,OC_MODE_INTER_MV_FOUR)
2283
357k
   +OC_MINI(_enc->mv_bits[0]+bits0,_enc->mv_bits[1]+bits1)
2284
357k
   -OC_MINI(_enc->mv_bits[0],_enc->mv_bits[1])<<OC_BIT_SCALE;
2285
357k
  oc_mode_set_cost(_modec,_enc->lambda);
2286
357k
}
2287
2288
39.1k
int oc_enc_analyze_inter(oc_enc_ctx *_enc,int _allow_keyframe,int _recode){
2289
39.1k
  oc_set_chroma_mvs_func  set_chroma_mvs;
2290
39.1k
  oc_qii_state            intra_luma_qs;
2291
39.1k
  oc_mv                   last_mv;
2292
39.1k
  oc_mv                   prior_mv;
2293
39.1k
  ogg_int64_t             interbits;
2294
39.1k
  ogg_int64_t             intrabits;
2295
39.1k
  ogg_int64_t             activity_sum;
2296
39.1k
  ogg_int64_t             luma_sum;
2297
39.1k
  unsigned                activity_avg;
2298
39.1k
  unsigned                luma_avg;
2299
39.1k
  const ogg_uint16_t     *chroma_rd_scale;
2300
39.1k
  ogg_uint16_t           *mcu_rd_scale;
2301
39.1k
  ogg_uint16_t           *mcu_rd_iscale;
2302
39.1k
  const unsigned char    *map_idxs;
2303
39.1k
  int                     nmap_idxs;
2304
39.1k
  unsigned               *coded_mbis;
2305
39.1k
  unsigned               *uncoded_mbis;
2306
39.1k
  size_t                  ncoded_mbis;
2307
39.1k
  size_t                  nuncoded_mbis;
2308
39.1k
  oc_sb_flags            *sb_flags;
2309
39.1k
  signed char            *mb_modes;
2310
39.1k
  const oc_sb_map        *sb_maps;
2311
39.1k
  const oc_mb_map        *mb_maps;
2312
39.1k
  oc_mb_enc_info         *embs;
2313
39.1k
  oc_fragment            *frags;
2314
39.1k
  oc_mv                  *frag_mvs;
2315
39.1k
  unsigned                stripe_sby;
2316
39.1k
  unsigned                mcu_nvsbs;
2317
39.1k
  int                     notstart;
2318
39.1k
  int                     notdone;
2319
39.1k
  unsigned                sbi;
2320
39.1k
  unsigned                sbi_end;
2321
39.1k
  int                     refi;
2322
39.1k
  int                     pli;
2323
39.1k
  int                     sp_level;
2324
39.1k
  sp_level=_enc->sp_level;
2325
39.1k
  set_chroma_mvs=OC_SET_CHROMA_MVS_TABLE[_enc->state.info.pixel_fmt];
2326
39.1k
  _enc->state.frame_type=OC_INTER_FRAME;
2327
39.1k
  oc_mode_scheme_chooser_reset(&_enc->chooser);
2328
39.1k
  oc_enc_tokenize_start(_enc);
2329
39.1k
  oc_enc_pipeline_init(_enc,&_enc->pipe);
2330
39.1k
  oc_enc_mode_rd_init(_enc);
2331
39.1k
  if(_allow_keyframe)oc_qii_state_init(&intra_luma_qs);
2332
39.1k
  _enc->mv_bits[0]=_enc->mv_bits[1]=0;
2333
39.1k
  interbits=intrabits=0;
2334
39.1k
  activity_sum=luma_sum=0;
2335
39.1k
  activity_avg=_enc->activity_avg;
2336
39.1k
  luma_avg=OC_CLAMPI(90<<8,_enc->luma_avg,160<<8);
2337
39.1k
  chroma_rd_scale=_enc->chroma_rd_scale[OC_INTER_FRAME][_enc->state.qis[0]];
2338
39.1k
  mcu_rd_scale=_enc->mcu_rd_scale;
2339
39.1k
  mcu_rd_iscale=_enc->mcu_rd_iscale;
2340
39.1k
  last_mv=prior_mv=0;
2341
  /*Choose MVs and MB modes and quantize and code luma.
2342
    Must be done in Hilbert order.*/
2343
39.1k
  map_idxs=OC_MB_MAP_IDXS[_enc->state.info.pixel_fmt];
2344
39.1k
  nmap_idxs=OC_MB_MAP_NIDXS[_enc->state.info.pixel_fmt];
2345
39.1k
  coded_mbis=_enc->coded_mbis;
2346
39.1k
  uncoded_mbis=coded_mbis+_enc->state.nmbs;
2347
39.1k
  ncoded_mbis=0;
2348
39.1k
  nuncoded_mbis=0;
2349
39.1k
  _enc->state.ncoded_fragis[0]=0;
2350
39.1k
  _enc->state.ncoded_fragis[1]=0;
2351
39.1k
  _enc->state.ncoded_fragis[2]=0;
2352
39.1k
  sb_flags=_enc->state.sb_flags;
2353
39.1k
  mb_modes=_enc->state.mb_modes;
2354
39.1k
  sb_maps=(const oc_sb_map *)_enc->state.sb_maps;
2355
39.1k
  mb_maps=(const oc_mb_map *)_enc->state.mb_maps;
2356
39.1k
  embs=_enc->mb_info;
2357
39.1k
  frags=_enc->state.frags;
2358
39.1k
  frag_mvs=_enc->state.frag_mvs;
2359
39.1k
  notstart=0;
2360
39.1k
  notdone=1;
2361
39.1k
  mcu_nvsbs=_enc->mcu_nvsbs;
2362
80.1k
  for(stripe_sby=0;notdone;stripe_sby+=mcu_nvsbs){
2363
41.0k
    ptrdiff_t cfroffset;
2364
41.0k
    notdone=oc_enc_pipeline_set_stripe(_enc,&_enc->pipe,stripe_sby);
2365
41.0k
    sbi_end=_enc->pipe.sbi_end[0];
2366
41.0k
    cfroffset=_enc->pipe.froffset[1];
2367
141k
    for(sbi=_enc->pipe.sbi0[0];sbi<sbi_end;sbi++){
2368
100k
      int quadi;
2369
      /*Mode addressing is through Y plane, always 4 MB per SB.*/
2370
500k
      for(quadi=0;quadi<4;quadi++)if(sb_flags[sbi].quad_valid&1<<quadi){
2371
308k
        oc_mode_choice modes[8];
2372
308k
        unsigned       activity[4];
2373
308k
        unsigned       rd_scale[5];
2374
308k
        unsigned       rd_iscale[5];
2375
308k
        unsigned       skip_ssd[12];
2376
308k
        unsigned       intra_satd[12];
2377
308k
        unsigned       luma;
2378
308k
        int            mb_mv_bits_0;
2379
308k
        int            mb_gmv_bits_0;
2380
308k
        int            inter_mv_pref;
2381
308k
        int            mb_mode;
2382
308k
        int            refi;
2383
308k
        int            mv;
2384
308k
        unsigned       mbi;
2385
308k
        int            mapii;
2386
308k
        int            mapi;
2387
308k
        int            bi;
2388
308k
        ptrdiff_t      fragi;
2389
308k
        mbi=sbi<<2|quadi;
2390
308k
        luma=oc_mb_intra_satd(_enc,mbi,intra_satd);
2391
        /*Activity masking.*/
2392
308k
        if(sp_level<OC_SP_LEVEL_FAST_ANALYSIS){
2393
308k
          oc_mb_activity(_enc,mbi,activity);
2394
308k
        }
2395
0
        else oc_mb_activity_fast(_enc,mbi,activity,intra_satd);
2396
308k
        luma_sum+=luma;
2397
308k
        activity_sum+=oc_mb_masking(rd_scale,rd_iscale,
2398
308k
         chroma_rd_scale,activity,activity_avg,luma,luma_avg);
2399
        /*Motion estimation:
2400
          We always do a basic 1MV search for all macroblocks, coded or not,
2401
           keyframe or not.*/
2402
308k
        if(!_recode&&sp_level<OC_SP_LEVEL_NOMC)oc_mcenc_search(_enc,mbi);
2403
308k
        mv=0;
2404
        /*Find the block choice with the lowest estimated coding cost.
2405
          If a Cb or Cr block is coded but no Y' block from a macro block then
2406
           the mode MUST be OC_MODE_INTER_NOMV.
2407
          This is the default state to which the mode data structure is
2408
           initialised in encoder and decoder at the start of each frame.*/
2409
        /*Block coding cost is estimated from correlated SATD metrics.*/
2410
        /*At this point, all blocks that are in frame are still marked coded.*/
2411
308k
        if(!_recode){
2412
222k
          embs[mbi].unref_mv[OC_FRAME_GOLD]=
2413
222k
           embs[mbi].analysis_mv[0][OC_FRAME_GOLD];
2414
222k
          embs[mbi].unref_mv[OC_FRAME_PREV]=
2415
222k
           embs[mbi].analysis_mv[0][OC_FRAME_PREV];
2416
222k
          embs[mbi].refined=0;
2417
222k
        }
2418
        /*Estimate the cost of coding this MB in a keyframe.*/
2419
308k
        if(_allow_keyframe){
2420
308k
          oc_cost_intra(_enc,modes+OC_MODE_INTRA,mbi,
2421
308k
           _enc->pipe.fr+0,&intra_luma_qs,intra_satd,OC_NOSKIP,rd_scale);
2422
308k
          intrabits+=modes[OC_MODE_INTRA].rate;
2423
1.54M
          for(bi=0;bi<4;bi++){
2424
1.23M
            oc_qii_state_advance(&intra_luma_qs,&intra_luma_qs,
2425
1.23M
             modes[OC_MODE_INTRA].qii[bi]);
2426
1.23M
          }
2427
308k
        }
2428
        /*Estimate the cost in a delta frame for various modes.*/
2429
308k
        oc_skip_cost(_enc,&_enc->pipe,mbi,rd_scale,skip_ssd);
2430
308k
        if(sp_level<OC_SP_LEVEL_NOMC){
2431
308k
          oc_cost_inter_nomv(_enc,modes+OC_MODE_INTER_NOMV,mbi,
2432
308k
           OC_MODE_INTER_NOMV,_enc->pipe.fr+0,_enc->pipe.qs+0,
2433
308k
           skip_ssd,rd_scale);
2434
308k
          oc_cost_intra(_enc,modes+OC_MODE_INTRA,mbi,
2435
308k
           _enc->pipe.fr+0,_enc->pipe.qs+0,intra_satd,skip_ssd,rd_scale);
2436
308k
          mb_mv_bits_0=oc_cost_inter1mv(_enc,modes+OC_MODE_INTER_MV,mbi,
2437
308k
           OC_MODE_INTER_MV,embs[mbi].unref_mv[OC_FRAME_PREV],
2438
308k
           _enc->pipe.fr+0,_enc->pipe.qs+0,skip_ssd,rd_scale);
2439
308k
          oc_cost_inter(_enc,modes+OC_MODE_INTER_MV_LAST,mbi,
2440
308k
           OC_MODE_INTER_MV_LAST,last_mv,_enc->pipe.fr+0,_enc->pipe.qs+0,
2441
308k
           skip_ssd,rd_scale);
2442
308k
          oc_cost_inter(_enc,modes+OC_MODE_INTER_MV_LAST2,mbi,
2443
308k
           OC_MODE_INTER_MV_LAST2,prior_mv,_enc->pipe.fr+0,_enc->pipe.qs+0,
2444
308k
           skip_ssd,rd_scale);
2445
308k
          oc_cost_inter_nomv(_enc,modes+OC_MODE_GOLDEN_NOMV,mbi,
2446
308k
           OC_MODE_GOLDEN_NOMV,_enc->pipe.fr+0,_enc->pipe.qs+0,
2447
308k
           skip_ssd,rd_scale);
2448
308k
          mb_gmv_bits_0=oc_cost_inter1mv(_enc,modes+OC_MODE_GOLDEN_MV,mbi,
2449
308k
           OC_MODE_GOLDEN_MV,embs[mbi].unref_mv[OC_FRAME_GOLD],
2450
308k
           _enc->pipe.fr+0,_enc->pipe.qs+0,skip_ssd,rd_scale);
2451
          /*The explicit MV modes (2,6,7) have not yet gone through halfpel
2452
             refinement.
2453
            We choose the explicit MV mode that's already furthest ahead on
2454
             R-D cost and refine only that one.
2455
            We have to be careful to remember which ones we've refined so that
2456
             we don't refine it again if we re-encode this frame.*/
2457
308k
          inter_mv_pref=_enc->lambda*3;
2458
308k
          if(sp_level<OC_SP_LEVEL_FAST_ANALYSIS){
2459
308k
            oc_cost_inter4mv(_enc,modes+OC_MODE_INTER_MV_FOUR,mbi,
2460
308k
             embs[mbi].block_mv,_enc->pipe.fr+0,_enc->pipe.qs+0,
2461
308k
             skip_ssd,rd_scale);
2462
308k
          }
2463
0
          else{
2464
0
            modes[OC_MODE_INTER_MV_FOUR].cost=UINT_MAX;
2465
0
          }
2466
308k
          if(modes[OC_MODE_INTER_MV_FOUR].cost<modes[OC_MODE_INTER_MV].cost&&
2467
65.5k
           modes[OC_MODE_INTER_MV_FOUR].cost<modes[OC_MODE_GOLDEN_MV].cost){
2468
49.0k
            if(!(embs[mbi].refined&0x80)){
2469
34.7k
              oc_mcenc_refine4mv(_enc,mbi);
2470
34.7k
              embs[mbi].refined|=0x80;
2471
34.7k
            }
2472
49.0k
            oc_cost_inter4mv(_enc,modes+OC_MODE_INTER_MV_FOUR,mbi,
2473
49.0k
             embs[mbi].ref_mv,_enc->pipe.fr+0,_enc->pipe.qs+0,
2474
49.0k
             skip_ssd,rd_scale);
2475
49.0k
          }
2476
259k
          else if(modes[OC_MODE_GOLDEN_MV].cost+inter_mv_pref<
2477
259k
           modes[OC_MODE_INTER_MV].cost){
2478
65.5k
            if(!(embs[mbi].refined&0x40)){
2479
53.9k
              oc_mcenc_refine1mv(_enc,mbi,OC_FRAME_GOLD);
2480
53.9k
              embs[mbi].refined|=0x40;
2481
53.9k
            }
2482
65.5k
            mb_gmv_bits_0=oc_cost_inter1mv(_enc,modes+OC_MODE_GOLDEN_MV,mbi,
2483
65.5k
             OC_MODE_GOLDEN_MV,embs[mbi].analysis_mv[0][OC_FRAME_GOLD],
2484
65.5k
             _enc->pipe.fr+0,_enc->pipe.qs+0,skip_ssd,rd_scale);
2485
65.5k
          }
2486
308k
          if(!(embs[mbi].refined&0x04)){
2487
222k
            oc_mcenc_refine1mv(_enc,mbi,OC_FRAME_PREV);
2488
222k
            embs[mbi].refined|=0x04;
2489
222k
          }
2490
308k
          mb_mv_bits_0=oc_cost_inter1mv(_enc,modes+OC_MODE_INTER_MV,mbi,
2491
308k
           OC_MODE_INTER_MV,embs[mbi].analysis_mv[0][OC_FRAME_PREV],
2492
308k
           _enc->pipe.fr+0,_enc->pipe.qs+0,skip_ssd,rd_scale);
2493
          /*Finally, pick the mode with the cheapest estimated R-D cost.*/
2494
308k
          mb_mode=OC_MODE_INTER_NOMV;
2495
308k
          if(modes[OC_MODE_INTRA].cost<modes[OC_MODE_INTER_NOMV].cost){
2496
243k
            mb_mode=OC_MODE_INTRA;
2497
243k
          }
2498
308k
          if(modes[OC_MODE_INTER_MV_LAST].cost<modes[mb_mode].cost){
2499
34.8k
            mb_mode=OC_MODE_INTER_MV_LAST;
2500
34.8k
          }
2501
308k
          if(modes[OC_MODE_INTER_MV_LAST2].cost<modes[mb_mode].cost){
2502
8.26k
            mb_mode=OC_MODE_INTER_MV_LAST2;
2503
8.26k
          }
2504
308k
          if(modes[OC_MODE_GOLDEN_NOMV].cost<modes[mb_mode].cost){
2505
6.29k
            mb_mode=OC_MODE_GOLDEN_NOMV;
2506
6.29k
          }
2507
308k
          if(modes[OC_MODE_GOLDEN_MV].cost<modes[mb_mode].cost){
2508
25.6k
            mb_mode=OC_MODE_GOLDEN_MV;
2509
25.6k
          }
2510
308k
          if(modes[OC_MODE_INTER_MV_FOUR].cost<modes[mb_mode].cost){
2511
14.0k
            mb_mode=OC_MODE_INTER_MV_FOUR;
2512
14.0k
          }
2513
          /*We prefer OC_MODE_INTER_MV, but not over LAST and LAST2.*/
2514
308k
          if(mb_mode==OC_MODE_INTER_MV_LAST||mb_mode==OC_MODE_INTER_MV_LAST2){
2515
30.5k
            inter_mv_pref=0;
2516
30.5k
          }
2517
308k
          if(modes[OC_MODE_INTER_MV].cost<modes[mb_mode].cost+inter_mv_pref){
2518
24.5k
            mb_mode=OC_MODE_INTER_MV;
2519
24.5k
          }
2520
308k
        }
2521
0
        else{
2522
0
          oc_cost_inter_nomv(_enc,modes+OC_MODE_INTER_NOMV,mbi,
2523
0
           OC_MODE_INTER_NOMV,_enc->pipe.fr+0,_enc->pipe.qs+0,
2524
0
           skip_ssd,rd_scale);
2525
0
          oc_cost_intra(_enc,modes+OC_MODE_INTRA,mbi,
2526
0
           _enc->pipe.fr+0,_enc->pipe.qs+0,intra_satd,skip_ssd,rd_scale);
2527
0
          oc_cost_inter_nomv(_enc,modes+OC_MODE_GOLDEN_NOMV,mbi,
2528
0
           OC_MODE_GOLDEN_NOMV,_enc->pipe.fr+0,_enc->pipe.qs+0,
2529
0
           skip_ssd,rd_scale);
2530
0
          mb_mode=OC_MODE_INTER_NOMV;
2531
0
          if(modes[OC_MODE_INTRA].cost<modes[OC_MODE_INTER_NOMV].cost){
2532
0
            mb_mode=OC_MODE_INTRA;
2533
0
          }
2534
0
          if(modes[OC_MODE_GOLDEN_NOMV].cost<modes[mb_mode].cost){
2535
0
            mb_mode=OC_MODE_GOLDEN_NOMV;
2536
0
          }
2537
0
          mb_mv_bits_0=mb_gmv_bits_0=0;
2538
0
        }
2539
308k
        mb_modes[mbi]=mb_mode;
2540
        /*Propagate the MVs to the luma blocks.*/
2541
308k
        if(mb_mode!=OC_MODE_INTER_MV_FOUR){
2542
300k
          switch(mb_mode){
2543
24.5k
            case OC_MODE_INTER_MV:{
2544
24.5k
              mv=embs[mbi].analysis_mv[0][OC_FRAME_PREV];
2545
24.5k
            }break;
2546
21.9k
            case OC_MODE_INTER_MV_LAST:mv=last_mv;break;
2547
4.66k
            case OC_MODE_INTER_MV_LAST2:mv=prior_mv;break;
2548
15.3k
            case OC_MODE_GOLDEN_MV:{
2549
15.3k
              mv=embs[mbi].analysis_mv[0][OC_FRAME_GOLD];
2550
15.3k
            }break;
2551
300k
          }
2552
1.50M
          for(bi=0;bi<4;bi++){
2553
1.20M
            fragi=mb_maps[mbi][0][bi];
2554
1.20M
            frag_mvs[fragi]=mv;
2555
1.20M
          }
2556
300k
        }
2557
1.54M
        for(bi=0;bi<4;bi++){
2558
1.23M
          fragi=sb_maps[mbi>>2][mbi&3][bi];
2559
1.23M
          frags[fragi].qii=modes[mb_mode].qii[bi];
2560
1.23M
        }
2561
308k
        if(oc_enc_mb_transform_quantize_inter_luma(_enc,&_enc->pipe,mbi,
2562
308k
         modes[mb_mode].overhead>>OC_BIT_SCALE,rd_scale,rd_iscale)>0){
2563
237k
          int orig_mb_mode;
2564
237k
          orig_mb_mode=mb_mode;
2565
237k
          mb_mode=mb_modes[mbi];
2566
237k
          refi=OC_FRAME_FOR_MODE(mb_mode);
2567
237k
          switch(mb_mode){
2568
10.6k
            case OC_MODE_INTER_MV:{
2569
10.6k
              prior_mv=last_mv;
2570
              /*If we're backing out from 4MV, find the MV we're actually
2571
                 using.*/
2572
10.6k
              if(orig_mb_mode==OC_MODE_INTER_MV_FOUR){
2573
707
                for(bi=0;;bi++){
2574
707
                  fragi=mb_maps[mbi][0][bi];
2575
707
                  if(frags[fragi].coded){
2576
203
                    mv=last_mv=frag_mvs[fragi];
2577
203
                    break;
2578
203
                  }
2579
707
                }
2580
203
                mb_mv_bits_0=OC_MV_BITS[0][OC_MV_X(mv)+31]
2581
203
                 +OC_MV_BITS[0][OC_MV_Y(mv)+31];
2582
203
              }
2583
              /*Otherwise we used the original analysis MV.*/
2584
10.4k
              else last_mv=embs[mbi].analysis_mv[0][OC_FRAME_PREV];
2585
10.6k
              _enc->mv_bits[0]+=mb_mv_bits_0;
2586
10.6k
              _enc->mv_bits[1]+=12;
2587
10.6k
            }break;
2588
3.42k
            case OC_MODE_INTER_MV_LAST2:{
2589
3.42k
              oc_mv tmp_mv;
2590
3.42k
              tmp_mv=prior_mv;
2591
3.42k
              prior_mv=last_mv;
2592
3.42k
              last_mv=tmp_mv;
2593
3.42k
            }break;
2594
7.16k
            case OC_MODE_GOLDEN_MV:{
2595
7.16k
              _enc->mv_bits[0]+=mb_gmv_bits_0;
2596
7.16k
              _enc->mv_bits[1]+=12;
2597
7.16k
            }break;
2598
6.16k
            case OC_MODE_INTER_MV_FOUR:{
2599
6.16k
              oc_mv lbmvs[4];
2600
6.16k
              oc_mv cbmvs[4];
2601
6.16k
              prior_mv=last_mv;
2602
30.8k
              for(bi=0;bi<4;bi++){
2603
24.6k
                fragi=mb_maps[mbi][0][bi];
2604
24.6k
                if(frags[fragi].coded){
2605
23.5k
                  lbmvs[bi]=last_mv=frag_mvs[fragi];
2606
23.5k
                  _enc->mv_bits[0]+=OC_MV_BITS[0][OC_MV_X(last_mv)+31]
2607
23.5k
                   +OC_MV_BITS[0][OC_MV_Y(last_mv)+31];
2608
23.5k
                  _enc->mv_bits[1]+=12;
2609
23.5k
                }
2610
                /*Replace the block MVs for not-coded blocks with (0,0).*/
2611
1.05k
                else lbmvs[bi]=0;
2612
24.6k
              }
2613
6.16k
              (*set_chroma_mvs)(cbmvs,lbmvs);
2614
19.6k
              for(mapii=4;mapii<nmap_idxs;mapii++){
2615
13.5k
                mapi=map_idxs[mapii];
2616
13.5k
                pli=mapi>>2;
2617
13.5k
                bi=mapi&3;
2618
13.5k
                fragi=mb_maps[mbi][pli][bi];
2619
13.5k
                frags[fragi].qii=modes[OC_MODE_INTER_MV_FOUR].qii[mapii];
2620
13.5k
                frags[fragi].refi=refi;
2621
13.5k
                frags[fragi].mb_mode=mb_mode;
2622
13.5k
                frag_mvs[fragi]=cbmvs[bi];
2623
13.5k
              }
2624
6.16k
            }break;
2625
237k
          }
2626
237k
          coded_mbis[ncoded_mbis++]=mbi;
2627
237k
          oc_mode_scheme_chooser_update(&_enc->chooser,mb_mode);
2628
237k
          interbits+=modes[mb_mode].rate+modes[mb_mode].overhead;
2629
237k
        }
2630
70.8k
        else{
2631
70.8k
          *(uncoded_mbis-++nuncoded_mbis)=mbi;
2632
70.8k
          mb_mode=OC_MODE_INTER_NOMV;
2633
70.8k
          refi=OC_FRAME_PREV;
2634
70.8k
          mv=0;
2635
70.8k
        }
2636
        /*Propagate final MB mode and MVs to the chroma blocks.
2637
          This has already been done for 4MV mode, since it requires individual
2638
           block motion vectors.*/
2639
308k
        if(mb_mode!=OC_MODE_INTER_MV_FOUR){
2640
975k
          for(mapii=4;mapii<nmap_idxs;mapii++){
2641
673k
            mapi=map_idxs[mapii];
2642
673k
            pli=mapi>>2;
2643
673k
            bi=mapi&3;
2644
673k
            fragi=mb_maps[mbi][pli][bi];
2645
            /*If we switched from 4MV mode to INTER_MV mode, then the qii
2646
               values won't have been chosen with the right MV, but it's
2647
               probably not worth re-estimating them.*/
2648
673k
            frags[fragi].qii=modes[mb_mode].qii[mapii];
2649
673k
            frags[fragi].refi=refi;
2650
673k
            frags[fragi].mb_mode=mb_mode;
2651
673k
            frag_mvs[fragi]=mv;
2652
673k
          }
2653
302k
        }
2654
        /*Save masking scale factors for chroma blocks.*/
2655
652k
        for(mapii=4;mapii<(nmap_idxs-4>>1)+4;mapii++){
2656
343k
          mapi=map_idxs[mapii];
2657
343k
          bi=mapi&3;
2658
343k
          fragi=mb_maps[mbi][1][bi];
2659
343k
          mcu_rd_scale[fragi-cfroffset]=(ogg_uint16_t)rd_scale[4];
2660
343k
          mcu_rd_iscale[fragi-cfroffset]=(ogg_uint16_t)rd_iscale[4];
2661
343k
        }
2662
308k
      }
2663
100k
      oc_fr_state_flush_sb(_enc->pipe.fr+0);
2664
100k
      sb_flags[sbi].coded_fully=_enc->pipe.fr[0].sb_full;
2665
100k
      sb_flags[sbi].coded_partially=_enc->pipe.fr[0].sb_partial;
2666
100k
    }
2667
41.0k
    oc_enc_pipeline_finish_mcu_plane(_enc,&_enc->pipe,0,notstart,notdone);
2668
    /*Code chroma planes.*/
2669
123k
    for(pli=1;pli<3;pli++){
2670
82.0k
      oc_enc_sb_transform_quantize_inter_chroma(_enc,&_enc->pipe,
2671
82.0k
       pli,_enc->pipe.sbi0[pli],_enc->pipe.sbi_end[pli]);
2672
82.0k
      oc_enc_pipeline_finish_mcu_plane(_enc,&_enc->pipe,pli,notstart,notdone);
2673
82.0k
    }
2674
41.0k
    notstart=1;
2675
41.0k
  }
2676
  /*Update the average block activity and MB luma score for the frame.
2677
    We could use a Bessel follower here, but fast reaction is probably almost
2678
     always best.*/
2679
39.1k
  _enc->activity_avg=OC_MAXI(OC_ACTIVITY_AVG_MIN,
2680
39.1k
   (unsigned)((activity_sum+(_enc->state.fplanes[0].nfrags>>1))/
2681
39.1k
   _enc->state.fplanes[0].nfrags));
2682
39.1k
  _enc->luma_avg=(unsigned)((luma_sum+(_enc->state.nmbs>>1))/_enc->state.nmbs);
2683
  /*Finish filling in the reference frame borders.*/
2684
39.1k
  refi=_enc->state.ref_frame_idx[OC_FRAME_SELF];
2685
156k
  for(pli=0;pli<3;pli++)oc_state_borders_fill_caps(&_enc->state,refi,pli);
2686
  /*Finish adding flagging overhead costs to inter bit counts to determine if
2687
     we should have coded a key frame instead.*/
2688
39.1k
  if(_allow_keyframe){
2689
    /*Technically the chroma plane counts are over-estimations, because they
2690
       don't account for continuing runs from the luma planes, but the
2691
       inaccuracy is small.
2692
      We don't need to add the luma plane coding flag costs, because they are
2693
       already included in the MB rate estimates.*/
2694
117k
    for(pli=1;pli<3;pli++)interbits+=_enc->pipe.fr[pli].bits<<OC_BIT_SCALE;
2695
39.1k
    if(interbits>intrabits)return 1;
2696
39.1k
  }
2697
21.8k
  _enc->ncoded_mbis=ncoded_mbis;
2698
  /*Compact the coded fragment list.*/
2699
21.8k
  {
2700
21.8k
    ptrdiff_t ncoded_fragis;
2701
21.8k
    ncoded_fragis=_enc->state.ncoded_fragis[0];
2702
65.6k
    for(pli=1;pli<3;pli++){
2703
43.7k
      memmove(_enc->state.coded_fragis+ncoded_fragis,
2704
43.7k
       _enc->state.coded_fragis+_enc->state.fplanes[pli].froffset,
2705
43.7k
       _enc->state.ncoded_fragis[pli]*sizeof(*_enc->state.coded_fragis));
2706
43.7k
      ncoded_fragis+=_enc->state.ncoded_fragis[pli];
2707
43.7k
    }
2708
21.8k
    _enc->state.ntotal_coded_fragis=ncoded_fragis;
2709
21.8k
  }
2710
21.8k
  return 0;
2711
39.1k
}