h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
28 #include "libavutil/imgutils.h"
29 #include "internal.h"
30 #include "cabac.h"
31 #include "cabac_functions.h"
32 #include "dsputil.h"
33 #include "avcodec.h"
34 #include "mpegvideo.h"
35 #include "h264.h"
36 #include "h264data.h"
37 #include "h264_mvpred.h"
38 #include "golomb.h"
39 #include "mathops.h"
40 #include "rectangle.h"
41 #include "thread.h"
42 #include "vdpau_internal.h"
43 #include "libavutil/avassert.h"
44 
45 // #undef NDEBUG
46 #include <assert.h>
47 
48 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
49 
50 static const uint8_t rem6[QP_MAX_NUM + 1] = {
51  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
52  3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
53  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
54 };
55 
56 static const uint8_t div6[QP_MAX_NUM + 1] = {
57  0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
58  3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
59  7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
60 };
61 
63 #if CONFIG_H264_DXVA2_HWACCEL
65 #endif
66 #if CONFIG_H264_VAAPI_HWACCEL
68 #endif
69 #if CONFIG_H264_VDA_HWACCEL
71 #endif
74 };
75 
81 {
82  MpegEncContext *const s = &h->s;
83  static const int8_t top[12] = {
84  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
85  };
86  static const int8_t left[12] = {
87  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
88  };
89  int i;
90 
91  if (!(h->top_samples_available & 0x8000)) {
92  for (i = 0; i < 4; i++) {
93  int status = top[h->intra4x4_pred_mode_cache[scan8[0] + i]];
94  if (status < 0) {
96  "top block unavailable for requested intra4x4 mode %d at %d %d\n",
97  status, s->mb_x, s->mb_y);
98  return -1;
99  } else if (status) {
100  h->intra4x4_pred_mode_cache[scan8[0] + i] = status;
101  }
102  }
103  }
104 
105  if ((h->left_samples_available & 0x8888) != 0x8888) {
106  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
107  for (i = 0; i < 4; i++)
108  if (!(h->left_samples_available & mask[i])) {
109  int status = left[h->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
110  if (status < 0) {
112  "left block unavailable for requested intra4x4 mode %d at %d %d\n",
113  status, s->mb_x, s->mb_y);
114  return -1;
115  } else if (status) {
116  h->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
117  }
118  }
119  }
120 
121  return 0;
122 } // FIXME cleanup like ff_h264_check_intra_pred_mode
123 
128 int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
129 {
130  MpegEncContext *const s = &h->s;
131  static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
132  static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
133 
134  if (mode > 3U) {
136  "out of range intra chroma pred mode at %d %d\n",
137  s->mb_x, s->mb_y);
138  return -1;
139  }
140 
141  if (!(h->top_samples_available & 0x8000)) {
142  mode = top[mode];
143  if (mode < 0) {
145  "top block unavailable for requested intra mode at %d %d\n",
146  s->mb_x, s->mb_y);
147  return -1;
148  }
149  }
150 
151  if ((h->left_samples_available & 0x8080) != 0x8080) {
152  mode = left[mode];
153  if (is_chroma && (h->left_samples_available & 0x8080)) {
154  // mad cow disease mode, aka MBAFF + constrained_intra_pred
155  mode = ALZHEIMER_DC_L0T_PRED8x8 +
156  (!(h->left_samples_available & 0x8000)) +
157  2 * (mode == DC_128_PRED8x8);
158  }
159  if (mode < 0) {
161  "left block unavailable for requested intra mode at %d %d\n",
162  s->mb_x, s->mb_y);
163  return -1;
164  }
165  }
166 
167  return mode;
168 }
169 
171  int *dst_length, int *consumed, int length)
172 {
173  int i, si, di;
174  uint8_t *dst;
175  int bufidx;
176 
177  // src[0]&0x80; // forbidden bit
178  h->nal_ref_idc = src[0] >> 5;
179  h->nal_unit_type = src[0] & 0x1F;
180 
181  src++;
182  length--;
183 
184 #define STARTCODE_TEST \
185  if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
186  if (src[i + 2] != 3) { \
187  /* startcode, so we must be past the end */ \
188  length = i; \
189  } \
190  break; \
191  }
192 #if HAVE_FAST_UNALIGNED
193 #define FIND_FIRST_ZERO \
194  if (i > 0 && !src[i]) \
195  i--; \
196  while (src[i]) \
197  i++
198 #if HAVE_FAST_64BIT
199  for (i = 0; i + 1 < length; i += 9) {
200  if (!((~AV_RN64A(src + i) &
201  (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
202  0x8000800080008080ULL))
203  continue;
204  FIND_FIRST_ZERO;
206  i -= 7;
207  }
208 #else
209  for (i = 0; i + 1 < length; i += 5) {
210  if (!((~AV_RN32A(src + i) &
211  (AV_RN32A(src + i) - 0x01000101U)) &
212  0x80008080U))
213  continue;
214  FIND_FIRST_ZERO;
216  i -= 3;
217  }
218 #endif
219 #else
220  for (i = 0; i + 1 < length; i += 2) {
221  if (src[i])
222  continue;
223  if (i > 0 && src[i - 1] == 0)
224  i--;
226  }
227 #endif
228 
229  if (i >= length - 1) { // no escaped 0
230  *dst_length = length;
231  *consumed = length + 1; // +1 for the header
232  return src;
233  }
234 
235  // use second escape buffer for inter data
236  bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0;
237  av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx],
239  dst = h->rbsp_buffer[bufidx];
240 
241  if (dst == NULL)
242  return NULL;
243 
244  memcpy(dst, src, i);
245  si = di = i;
246  while (si + 2 < length) {
247  // remove escapes (very rare 1:2^22)
248  if (src[si + 2] > 3) {
249  dst[di++] = src[si++];
250  dst[di++] = src[si++];
251  } else if (src[si] == 0 && src[si + 1] == 0) {
252  if (src[si + 2] == 3) { // escape
253  dst[di++] = 0;
254  dst[di++] = 0;
255  si += 3;
256  continue;
257  } else // next start code
258  goto nsc;
259  }
260 
261  dst[di++] = src[si++];
262  }
263  while (si < length)
264  dst[di++] = src[si++];
265 nsc:
266 
267  memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
268 
269  *dst_length = di;
270  *consumed = si + 1; // +1 for the header
271  /* FIXME store exact number of bits in the getbitcontext
272  * (it is needed for decoding) */
273  return dst;
274 }
275 
280 static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
281 {
282  int v = *src;
283  int r;
284 
285  tprintf(h->s.avctx, "rbsp trailing %X\n", v);
286 
287  for (r = 1; r < 9; r++) {
288  if (v & 1)
289  return r;
290  v >>= 1;
291  }
292  return 0;
293 }
294 
295 static inline int get_lowest_part_list_y(H264Context *h, Picture *pic, int n,
296  int height, int y_offset, int list)
297 {
298  int raw_my = h->mv_cache[list][scan8[n]][1];
299  int filter_height_up = (raw_my & 3) ? 2 : 0;
300  int filter_height_down = (raw_my & 3) ? 3 : 0;
301  int full_my = (raw_my >> 2) + y_offset;
302  int top = full_my - filter_height_up;
303  int bottom = full_my + filter_height_down + height;
304 
305  return FFMAX(abs(top), bottom);
306 }
307 
308 static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
309  int height, int y_offset, int list0,
310  int list1, int *nrefs)
311 {
312  MpegEncContext *const s = &h->s;
313  int my;
314 
315  y_offset += 16 * (s->mb_y >> MB_FIELD);
316 
317  if (list0) {
318  int ref_n = h->ref_cache[0][scan8[n]];
319  Picture *ref = &h->ref_list[0][ref_n];
320 
321  // Error resilience puts the current picture in the ref list.
322  // Don't try to wait on these as it will cause a deadlock.
323  // Fields can wait on each other, though.
324  if (ref->f.thread_opaque != s->current_picture.f.thread_opaque ||
325  (ref->f.reference & 3) != s->picture_structure) {
326  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
327  if (refs[0][ref_n] < 0)
328  nrefs[0] += 1;
329  refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
330  }
331  }
332 
333  if (list1) {
334  int ref_n = h->ref_cache[1][scan8[n]];
335  Picture *ref = &h->ref_list[1][ref_n];
336 
337  if (ref->f.thread_opaque != s->current_picture.f.thread_opaque ||
338  (ref->f.reference & 3) != s->picture_structure) {
339  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
340  if (refs[1][ref_n] < 0)
341  nrefs[1] += 1;
342  refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
343  }
344  }
345 }
346 
353 {
354  MpegEncContext *const s = &h->s;
355  const int mb_xy = h->mb_xy;
356  const int mb_type = s->current_picture.f.mb_type[mb_xy];
357  int refs[2][48];
358  int nrefs[2] = { 0 };
359  int ref, list;
360 
361  memset(refs, -1, sizeof(refs));
362 
363  if (IS_16X16(mb_type)) {
364  get_lowest_part_y(h, refs, 0, 16, 0,
365  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
366  } else if (IS_16X8(mb_type)) {
367  get_lowest_part_y(h, refs, 0, 8, 0,
368  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
369  get_lowest_part_y(h, refs, 8, 8, 8,
370  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
371  } else if (IS_8X16(mb_type)) {
372  get_lowest_part_y(h, refs, 0, 16, 0,
373  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
374  get_lowest_part_y(h, refs, 4, 16, 0,
375  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
376  } else {
377  int i;
378 
379  assert(IS_8X8(mb_type));
380 
381  for (i = 0; i < 4; i++) {
382  const int sub_mb_type = h->sub_mb_type[i];
383  const int n = 4 * i;
384  int y_offset = (i & 2) << 2;
385 
386  if (IS_SUB_8X8(sub_mb_type)) {
387  get_lowest_part_y(h, refs, n, 8, y_offset,
388  IS_DIR(sub_mb_type, 0, 0),
389  IS_DIR(sub_mb_type, 0, 1),
390  nrefs);
391  } else if (IS_SUB_8X4(sub_mb_type)) {
392  get_lowest_part_y(h, refs, n, 4, y_offset,
393  IS_DIR(sub_mb_type, 0, 0),
394  IS_DIR(sub_mb_type, 0, 1),
395  nrefs);
396  get_lowest_part_y(h, refs, n + 2, 4, y_offset + 4,
397  IS_DIR(sub_mb_type, 0, 0),
398  IS_DIR(sub_mb_type, 0, 1),
399  nrefs);
400  } else if (IS_SUB_4X8(sub_mb_type)) {
401  get_lowest_part_y(h, refs, n, 8, y_offset,
402  IS_DIR(sub_mb_type, 0, 0),
403  IS_DIR(sub_mb_type, 0, 1),
404  nrefs);
405  get_lowest_part_y(h, refs, n + 1, 8, y_offset,
406  IS_DIR(sub_mb_type, 0, 0),
407  IS_DIR(sub_mb_type, 0, 1),
408  nrefs);
409  } else {
410  int j;
411  assert(IS_SUB_4X4(sub_mb_type));
412  for (j = 0; j < 4; j++) {
413  int sub_y_offset = y_offset + 2 * (j & 2);
414  get_lowest_part_y(h, refs, n + j, 4, sub_y_offset,
415  IS_DIR(sub_mb_type, 0, 0),
416  IS_DIR(sub_mb_type, 0, 1),
417  nrefs);
418  }
419  }
420  }
421  }
422 
423  for (list = h->list_count - 1; list >= 0; list--)
424  for (ref = 0; ref < 48 && nrefs[list]; ref++) {
425  int row = refs[list][ref];
426  if (row >= 0) {
427  Picture *ref_pic = &h->ref_list[list][ref];
428  int ref_field = ref_pic->f.reference - 1;
429  int ref_field_picture = ref_pic->field_picture;
430  int pic_height = 16 * s->mb_height >> ref_field_picture;
431 
432  row <<= MB_MBAFF;
433  nrefs[list]--;
434 
435  if (!FIELD_PICTURE && ref_field_picture) { // frame referencing two fields
436  ff_thread_await_progress(&ref_pic->f,
437  FFMIN((row >> 1) - !(row & 1),
438  pic_height - 1),
439  1);
440  ff_thread_await_progress(&ref_pic->f,
441  FFMIN((row >> 1), pic_height - 1),
442  0);
443  } else if (FIELD_PICTURE && !ref_field_picture) { // field referencing one field of a frame
444  ff_thread_await_progress(&ref_pic->f,
445  FFMIN(row * 2 + ref_field,
446  pic_height - 1),
447  0);
448  } else if (FIELD_PICTURE) {
449  ff_thread_await_progress(&ref_pic->f,
450  FFMIN(row, pic_height - 1),
451  ref_field);
452  } else {
453  ff_thread_await_progress(&ref_pic->f,
454  FFMIN(row, pic_height - 1),
455  0);
456  }
457  }
458  }
459 }
460 
462  int n, int square, int height,
463  int delta, int list,
464  uint8_t *dest_y, uint8_t *dest_cb,
465  uint8_t *dest_cr,
466  int src_x_offset, int src_y_offset,
467  qpel_mc_func *qpix_op,
468  h264_chroma_mc_func chroma_op,
469  int pixel_shift, int chroma_idc)
470 {
471  MpegEncContext *const s = &h->s;
472  const int mx = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8;
473  int my = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8;
474  const int luma_xy = (mx & 3) + ((my & 3) << 2);
475  int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize;
476  uint8_t *src_y = pic->f.data[0] + offset;
477  uint8_t *src_cb, *src_cr;
478  int extra_width = h->emu_edge_width;
479  int extra_height = h->emu_edge_height;
480  int emu = 0;
481  const int full_mx = mx >> 2;
482  const int full_my = my >> 2;
483  const int pic_width = 16 * s->mb_width;
484  const int pic_height = 16 * s->mb_height >> MB_FIELD;
485  int ysh;
486 
487  if (mx & 7)
488  extra_width -= 3;
489  if (my & 7)
490  extra_height -= 3;
491 
492  if (full_mx < 0 - extra_width ||
493  full_my < 0 - extra_height ||
494  full_mx + 16 /*FIXME*/ > pic_width + extra_width ||
495  full_my + 16 /*FIXME*/ > pic_height + extra_height) {
497  src_y - (2 << pixel_shift) - 2 * h->mb_linesize,
498  h->mb_linesize,
499  16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
500  full_my - 2, pic_width, pic_height);
501  src_y = s->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
502  emu = 1;
503  }
504 
505  qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); // FIXME try variable height perhaps?
506  if (!square)
507  qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize);
508 
509  if (CONFIG_GRAY && s->flags & CODEC_FLAG_GRAY)
510  return;
511 
512  if (chroma_idc == 3 /* yuv444 */) {
513  src_cb = pic->f.data[1] + offset;
514  if (emu) {
516  src_cb - (2 << pixel_shift) - 2 * h->mb_linesize,
517  h->mb_linesize,
518  16 + 5, 16 + 5 /*FIXME*/,
519  full_mx - 2, full_my - 2,
520  pic_width, pic_height);
521  src_cb = s->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
522  }
523  qpix_op[luma_xy](dest_cb, src_cb, h->mb_linesize); // FIXME try variable height perhaps?
524  if (!square)
525  qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize);
526 
527  src_cr = pic->f.data[2] + offset;
528  if (emu) {
530  src_cr - (2 << pixel_shift) - 2 * h->mb_linesize,
531  h->mb_linesize,
532  16 + 5, 16 + 5 /*FIXME*/,
533  full_mx - 2, full_my - 2,
534  pic_width, pic_height);
535  src_cr = s->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
536  }
537  qpix_op[luma_xy](dest_cr, src_cr, h->mb_linesize); // FIXME try variable height perhaps?
538  if (!square)
539  qpix_op[luma_xy](dest_cr + delta, src_cr + delta, h->mb_linesize);
540  return;
541  }
542 
543  ysh = 3 - (chroma_idc == 2 /* yuv422 */);
544  if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) {
545  // chroma offset when predicting from a field of opposite parity
546  my += 2 * ((s->mb_y & 1) - (pic->f.reference - 1));
547  emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
548  }
549 
550  src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) +
551  (my >> ysh) * h->mb_uvlinesize;
552  src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) +
553  (my >> ysh) * h->mb_uvlinesize;
554 
555  if (emu) {
557  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
558  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
559  src_cb = s->edge_emu_buffer;
560  }
561  chroma_op(dest_cb, src_cb, h->mb_uvlinesize,
562  height >> (chroma_idc == 1 /* yuv420 */),
563  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
564 
565  if (emu) {
567  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
568  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
569  src_cr = s->edge_emu_buffer;
570  }
571  chroma_op(dest_cr, src_cr, h->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */),
572  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
573 }
574 
575 static av_always_inline void mc_part_std(H264Context *h, int n, int square,
576  int height, int delta,
577  uint8_t *dest_y, uint8_t *dest_cb,
578  uint8_t *dest_cr,
579  int x_offset, int y_offset,
580  qpel_mc_func *qpix_put,
581  h264_chroma_mc_func chroma_put,
582  qpel_mc_func *qpix_avg,
583  h264_chroma_mc_func chroma_avg,
584  int list0, int list1,
585  int pixel_shift, int chroma_idc)
586 {
587  MpegEncContext *const s = &h->s;
588  qpel_mc_func *qpix_op = qpix_put;
589  h264_chroma_mc_func chroma_op = chroma_put;
590 
591  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
592  if (chroma_idc == 3 /* yuv444 */) {
593  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
594  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
595  } else if (chroma_idc == 2 /* yuv422 */) {
596  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
597  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
598  } else { /* yuv420 */
599  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
600  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
601  }
602  x_offset += 8 * s->mb_x;
603  y_offset += 8 * (s->mb_y >> MB_FIELD);
604 
605  if (list0) {
606  Picture *ref = &h->ref_list[0][h->ref_cache[0][scan8[n]]];
607  mc_dir_part(h, ref, n, square, height, delta, 0,
608  dest_y, dest_cb, dest_cr, x_offset, y_offset,
609  qpix_op, chroma_op, pixel_shift, chroma_idc);
610 
611  qpix_op = qpix_avg;
612  chroma_op = chroma_avg;
613  }
614 
615  if (list1) {
616  Picture *ref = &h->ref_list[1][h->ref_cache[1][scan8[n]]];
617  mc_dir_part(h, ref, n, square, height, delta, 1,
618  dest_y, dest_cb, dest_cr, x_offset, y_offset,
619  qpix_op, chroma_op, pixel_shift, chroma_idc);
620  }
621 }
622 
624  int height, int delta,
625  uint8_t *dest_y, uint8_t *dest_cb,
626  uint8_t *dest_cr,
627  int x_offset, int y_offset,
628  qpel_mc_func *qpix_put,
629  h264_chroma_mc_func chroma_put,
630  h264_weight_func luma_weight_op,
631  h264_weight_func chroma_weight_op,
632  h264_biweight_func luma_weight_avg,
633  h264_biweight_func chroma_weight_avg,
634  int list0, int list1,
635  int pixel_shift, int chroma_idc)
636 {
637  MpegEncContext *const s = &h->s;
638  int chroma_height;
639 
640  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
641  if (chroma_idc == 3 /* yuv444 */) {
642  chroma_height = height;
643  chroma_weight_avg = luma_weight_avg;
644  chroma_weight_op = luma_weight_op;
645  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
646  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
647  } else if (chroma_idc == 2 /* yuv422 */) {
648  chroma_height = height;
649  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
650  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
651  } else { /* yuv420 */
652  chroma_height = height >> 1;
653  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
654  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
655  }
656  x_offset += 8 * s->mb_x;
657  y_offset += 8 * (s->mb_y >> MB_FIELD);
658 
659  if (list0 && list1) {
660  /* don't optimize for luma-only case, since B-frames usually
661  * use implicit weights => chroma too. */
662  uint8_t *tmp_cb = h->bipred_scratchpad;
663  uint8_t *tmp_cr = h->bipred_scratchpad + (16 << pixel_shift);
664  uint8_t *tmp_y = h->bipred_scratchpad + 16 * h->mb_uvlinesize;
665  int refn0 = h->ref_cache[0][scan8[n]];
666  int refn1 = h->ref_cache[1][scan8[n]];
667 
668  mc_dir_part(h, &h->ref_list[0][refn0], n, square, height, delta, 0,
669  dest_y, dest_cb, dest_cr,
670  x_offset, y_offset, qpix_put, chroma_put,
671  pixel_shift, chroma_idc);
672  mc_dir_part(h, &h->ref_list[1][refn1], n, square, height, delta, 1,
673  tmp_y, tmp_cb, tmp_cr,
674  x_offset, y_offset, qpix_put, chroma_put,
675  pixel_shift, chroma_idc);
676 
677  if (h->use_weight == 2) {
678  int weight0 = h->implicit_weight[refn0][refn1][s->mb_y & 1];
679  int weight1 = 64 - weight0;
680  luma_weight_avg(dest_y, tmp_y, h->mb_linesize,
681  height, 5, weight0, weight1, 0);
682  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize,
683  chroma_height, 5, weight0, weight1, 0);
684  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize,
685  chroma_height, 5, weight0, weight1, 0);
686  } else {
687  luma_weight_avg(dest_y, tmp_y, h->mb_linesize, height,
689  h->luma_weight[refn0][0][0],
690  h->luma_weight[refn1][1][0],
691  h->luma_weight[refn0][0][1] +
692  h->luma_weight[refn1][1][1]);
693  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, chroma_height,
695  h->chroma_weight[refn0][0][0][0],
696  h->chroma_weight[refn1][1][0][0],
697  h->chroma_weight[refn0][0][0][1] +
698  h->chroma_weight[refn1][1][0][1]);
699  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, chroma_height,
701  h->chroma_weight[refn0][0][1][0],
702  h->chroma_weight[refn1][1][1][0],
703  h->chroma_weight[refn0][0][1][1] +
704  h->chroma_weight[refn1][1][1][1]);
705  }
706  } else {
707  int list = list1 ? 1 : 0;
708  int refn = h->ref_cache[list][scan8[n]];
709  Picture *ref = &h->ref_list[list][refn];
710  mc_dir_part(h, ref, n, square, height, delta, list,
711  dest_y, dest_cb, dest_cr, x_offset, y_offset,
712  qpix_put, chroma_put, pixel_shift, chroma_idc);
713 
714  luma_weight_op(dest_y, h->mb_linesize, height,
716  h->luma_weight[refn][list][0],
717  h->luma_weight[refn][list][1]);
718  if (h->use_weight_chroma) {
719  chroma_weight_op(dest_cb, h->mb_uvlinesize, chroma_height,
721  h->chroma_weight[refn][list][0][0],
722  h->chroma_weight[refn][list][0][1]);
723  chroma_weight_op(dest_cr, h->mb_uvlinesize, chroma_height,
725  h->chroma_weight[refn][list][1][0],
726  h->chroma_weight[refn][list][1][1]);
727  }
728  }
729 }
730 
732  int pixel_shift, int chroma_idc)
733 {
734  /* fetch pixels for estimated mv 4 macroblocks ahead
735  * optimized for 64byte cache lines */
736  MpegEncContext *const s = &h->s;
737  const int refn = h->ref_cache[list][scan8[0]];
738  if (refn >= 0) {
739  const int mx = (h->mv_cache[list][scan8[0]][0] >> 2) + 16 * s->mb_x + 8;
740  const int my = (h->mv_cache[list][scan8[0]][1] >> 2) + 16 * s->mb_y;
741  uint8_t **src = h->ref_list[list][refn].f.data;
742  int off = (mx << pixel_shift) +
743  (my + (s->mb_x & 3) * 4) * h->mb_linesize +
744  (64 << pixel_shift);
745  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
746  if (chroma_idc == 3 /* yuv444 */) {
747  s->vdsp.prefetch(src[1] + off, s->linesize, 4);
748  s->vdsp.prefetch(src[2] + off, s->linesize, 4);
749  } else {
750  off = ((mx >> 1) << pixel_shift) +
751  ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize +
752  (64 << pixel_shift);
753  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
754  }
755  }
756 }
757 
758 static void free_tables(H264Context *h, int free_rbsp)
759 {
760  int i;
761  H264Context *hx;
762 
765  av_freep(&h->cbp_table);
766  av_freep(&h->mvd_table[0]);
767  av_freep(&h->mvd_table[1]);
768  av_freep(&h->direct_table);
771  h->slice_table = NULL;
772  av_freep(&h->list_counts);
773 
774  av_freep(&h->mb2b_xy);
775  av_freep(&h->mb2br_xy);
776 
777  for (i = 0; i < MAX_THREADS; i++) {
778  hx = h->thread_context[i];
779  if (!hx)
780  continue;
781  av_freep(&hx->top_borders[1]);
782  av_freep(&hx->top_borders[0]);
784  if (free_rbsp) {
785  av_freep(&hx->rbsp_buffer[1]);
786  av_freep(&hx->rbsp_buffer[0]);
787  hx->rbsp_buffer_size[0] = 0;
788  hx->rbsp_buffer_size[1] = 0;
789  }
790  if (i)
791  av_freep(&h->thread_context[i]);
792  }
793 }
794 
796 {
797  int i, j, q, x;
798  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
799 
800  for (i = 0; i < 6; i++) {
801  h->dequant8_coeff[i] = h->dequant8_buffer[i];
802  for (j = 0; j < i; j++)
803  if (!memcmp(h->pps.scaling_matrix8[j], h->pps.scaling_matrix8[i],
804  64 * sizeof(uint8_t))) {
805  h->dequant8_coeff[i] = h->dequant8_buffer[j];
806  break;
807  }
808  if (j < i)
809  continue;
810 
811  for (q = 0; q < max_qp + 1; q++) {
812  int shift = div6[q];
813  int idx = rem6[q];
814  for (x = 0; x < 64; x++)
815  h->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] =
816  ((uint32_t)dequant8_coeff_init[idx][dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] *
817  h->pps.scaling_matrix8[i][x]) << shift;
818  }
819  }
820 }
821 
823 {
824  int i, j, q, x;
825  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
826  for (i = 0; i < 6; i++) {
827  h->dequant4_coeff[i] = h->dequant4_buffer[i];
828  for (j = 0; j < i; j++)
829  if (!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i],
830  16 * sizeof(uint8_t))) {
831  h->dequant4_coeff[i] = h->dequant4_buffer[j];
832  break;
833  }
834  if (j < i)
835  continue;
836 
837  for (q = 0; q < max_qp + 1; q++) {
838  int shift = div6[q] + 2;
839  int idx = rem6[q];
840  for (x = 0; x < 16; x++)
841  h->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] =
842  ((uint32_t)dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] *
843  h->pps.scaling_matrix4[i][x]) << shift;
844  }
845  }
846 }
847 
849 {
850  int i, x;
852  if (h->pps.transform_8x8_mode)
854  if (h->sps.transform_bypass) {
855  for (i = 0; i < 6; i++)
856  for (x = 0; x < 16; x++)
857  h->dequant4_coeff[i][0][x] = 1 << 6;
859  for (i = 0; i < 6; i++)
860  for (x = 0; x < 64; x++)
861  h->dequant8_coeff[i][0][x] = 1 << 6;
862  }
863 }
864 
866 {
867  MpegEncContext *const s = &h->s;
868  const int big_mb_num = s->mb_stride * (s->mb_height + 1);
869  const int row_mb_num = s->mb_stride * 2 * s->avctx->thread_count;
870  int x, y;
871 
873  row_mb_num * 8 * sizeof(uint8_t), fail)
875  big_mb_num * 48 * sizeof(uint8_t), fail)
877  (big_mb_num + s->mb_stride) * sizeof(*h->slice_table_base), fail)
879  big_mb_num * sizeof(uint16_t), fail)
881  big_mb_num * sizeof(uint8_t), fail)
883  16 * row_mb_num * sizeof(uint8_t), fail);
885  16 * row_mb_num * sizeof(uint8_t), fail);
887  4 * big_mb_num * sizeof(uint8_t), fail);
889  big_mb_num * sizeof(uint8_t), fail)
890 
891  memset(h->slice_table_base, -1,
892  (big_mb_num + s->mb_stride) * sizeof(*h->slice_table_base));
893  h->slice_table = h->slice_table_base + s->mb_stride * 2 + 1;
894 
896  big_mb_num * sizeof(uint32_t), fail);
898  big_mb_num * sizeof(uint32_t), fail);
899  for (y = 0; y < s->mb_height; y++)
900  for (x = 0; x < s->mb_width; x++) {
901  const int mb_xy = x + y * s->mb_stride;
902  const int b_xy = 4 * x + 4 * y * h->b_stride;
903 
904  h->mb2b_xy[mb_xy] = b_xy;
905  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * s->mb_stride)));
906  }
907 
908  if (!h->dequant4_coeff[0])
910 
911  return 0;
912 
913 fail:
914  free_tables(h, 1);
915  return -1;
916 }
917 
921 static void clone_tables(H264Context *dst, H264Context *src, int i)
922 {
923  MpegEncContext *const s = &src->s;
924  dst->intra4x4_pred_mode = src->intra4x4_pred_mode + i * 8 * 2 * s->mb_stride;
925  dst->non_zero_count = src->non_zero_count;
926  dst->slice_table = src->slice_table;
927  dst->cbp_table = src->cbp_table;
928  dst->mb2b_xy = src->mb2b_xy;
929  dst->mb2br_xy = src->mb2br_xy;
931  dst->mvd_table[0] = src->mvd_table[0] + i * 8 * 2 * s->mb_stride;
932  dst->mvd_table[1] = src->mvd_table[1] + i * 8 * 2 * s->mb_stride;
933  dst->direct_table = src->direct_table;
934  dst->list_counts = src->list_counts;
935  dst->bipred_scratchpad = NULL;
936  ff_h264_pred_init(&dst->hpc, src->s.codec_id, src->sps.bit_depth_luma,
937  src->sps.chroma_format_idc);
938 }
939 
944 static int context_init(H264Context *h)
945 {
947  h->s.mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
949  h->s.mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
950 
951  h->ref_cache[0][scan8[5] + 1] =
952  h->ref_cache[0][scan8[7] + 1] =
953  h->ref_cache[0][scan8[13] + 1] =
954  h->ref_cache[1][scan8[5] + 1] =
955  h->ref_cache[1][scan8[7] + 1] =
956  h->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
957 
958  return 0;
959 
960 fail:
961  return -1; // free_tables will clean up for us
962 }
963 
964 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
965  int parse_extradata);
966 
968 {
969  MpegEncContext *const s = &h->s;
970 
971  s->width = s->avctx->width;
972  s->height = s->avctx->height;
973  s->codec_id = s->avctx->codec->id;
974 
975  ff_h264dsp_init(&h->h264dsp, 8, 1);
976  ff_h264_pred_init(&h->hpc, s->codec_id, 8, 1);
977 
978  h->dequant_coeff_pps = -1;
979  s->unrestricted_mv = 1;
980 
981  /* needed so that IDCT permutation is known early */
982  ff_dsputil_init(&s->dsp, s->avctx);
983  ff_videodsp_init(&s->vdsp, 8);
984 
985  memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
986  memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
987 }
988 
990 {
991  AVCodecContext *avctx = h->s.avctx;
992 
993  if (avctx->extradata[0] == 1) {
994  int i, cnt, nalsize;
995  unsigned char *p = avctx->extradata;
996 
997  h->is_avc = 1;
998 
999  if (avctx->extradata_size < 7) {
1000  av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
1001  return -1;
1002  }
1003  /* sps and pps in the avcC always have length coded with 2 bytes,
1004  * so put a fake nal_length_size = 2 while parsing them */
1005  h->nal_length_size = 2;
1006  // Decode sps from avcC
1007  cnt = *(p + 5) & 0x1f; // Number of sps
1008  p += 6;
1009  for (i = 0; i < cnt; i++) {
1010  nalsize = AV_RB16(p) + 2;
1011  if (p - avctx->extradata + nalsize > avctx->extradata_size)
1012  return -1;
1013  if (decode_nal_units(h, p, nalsize, 1) < 0) {
1014  av_log(avctx, AV_LOG_ERROR,
1015  "Decoding sps %d from avcC failed\n", i);
1016  return -1;
1017  }
1018  p += nalsize;
1019  }
1020  // Decode pps from avcC
1021  cnt = *(p++); // Number of pps
1022  for (i = 0; i < cnt; i++) {
1023  nalsize = AV_RB16(p) + 2;
1024  if (p - avctx->extradata + nalsize > avctx->extradata_size)
1025  return -1;
1026  if (decode_nal_units(h, p, nalsize, 1) < 0) {
1027  av_log(avctx, AV_LOG_ERROR,
1028  "Decoding pps %d from avcC failed\n", i);
1029  return -1;
1030  }
1031  p += nalsize;
1032  }
1033  // Now store right nal length size, that will be used to parse all other nals
1034  h->nal_length_size = (avctx->extradata[4] & 0x03) + 1;
1035  } else {
1036  h->is_avc = 0;
1037  if (decode_nal_units(h, avctx->extradata, avctx->extradata_size, 1) < 0)
1038  return -1;
1039  }
1040  return 0;
1041 }
1042 
1044 {
1045  H264Context *h = avctx->priv_data;
1046  MpegEncContext *const s = &h->s;
1047  int i;
1048 
1050 
1051  s->avctx = avctx;
1052  common_init(h);
1053 
1054  s->out_format = FMT_H264;
1055  s->workaround_bugs = avctx->workaround_bugs;
1056 
1057  /* set defaults */
1058  // s->decode_mb = ff_h263_decode_mb;
1059  s->quarter_sample = 1;
1060  if (!avctx->has_b_frames)
1061  s->low_delay = 1;
1062 
1064 
1066 
1067  h->pixel_shift = 0;
1068  h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8;
1069 
1070  h->thread_context[0] = h;
1071  h->outputed_poc = h->next_outputed_poc = INT_MIN;
1072  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1073  h->last_pocs[i] = INT_MIN;
1074  h->prev_poc_msb = 1 << 16;
1075  h->x264_build = -1;
1076  ff_h264_reset_sei(h);
1077  if (avctx->codec_id == AV_CODEC_ID_H264) {
1078  if (avctx->ticks_per_frame == 1)
1079  s->avctx->time_base.den *= 2;
1080  avctx->ticks_per_frame = 2;
1081  }
1082 
1083  if (avctx->extradata_size > 0 && avctx->extradata &&
1085  return -1;
1086 
1090  s->low_delay = 0;
1091  }
1092 
1093  return 0;
1094 }
1095 
1096 #define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size))))
1097 
1098 static void copy_picture_range(Picture **to, Picture **from, int count,
1099  MpegEncContext *new_base,
1100  MpegEncContext *old_base)
1101 {
1102  int i;
1103 
1104  for (i = 0; i < count; i++) {
1105  assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
1106  IN_RANGE(from[i], old_base->picture,
1107  sizeof(Picture) * old_base->picture_count) ||
1108  !from[i]));
1109  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
1110  }
1111 }
1112 
1113 static void copy_parameter_set(void **to, void **from, int count, int size)
1114 {
1115  int i;
1116 
1117  for (i = 0; i < count; i++) {
1118  if (to[i] && !from[i])
1119  av_freep(&to[i]);
1120  else if (from[i] && !to[i])
1121  to[i] = av_malloc(size);
1122 
1123  if (from[i])
1124  memcpy(to[i], from[i], size);
1125  }
1126 }
1127 
1129 {
1130  H264Context *h = avctx->priv_data;
1131 
1132  if (!avctx->internal->is_copy)
1133  return 0;
1134  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1135  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1136 
1137  h->s.context_initialized = 0;
1138 
1139  return 0;
1140 }
1141 
1142 #define copy_fields(to, from, start_field, end_field) \
1143  memcpy(&to->start_field, &from->start_field, \
1144  (char *)&to->end_field - (char *)&to->start_field)
1145 
1146 static int h264_slice_header_init(H264Context *, int);
1147 
1149 
1151  const AVCodecContext *src)
1152 {
1153  H264Context *h = dst->priv_data, *h1 = src->priv_data;
1154  MpegEncContext *const s = &h->s, *const s1 = &h1->s;
1155  int inited = s->context_initialized, err;
1156  int i;
1157 
1158  if (dst == src || !s1->context_initialized)
1159  return 0;
1160 
1161  if (inited &&
1162  (s->width != s1->width ||
1163  s->height != s1->height ||
1164  s->mb_width != s1->mb_width ||
1165  s->mb_height != s1->mb_height ||
1166  h->sps.bit_depth_luma != h1->sps.bit_depth_luma ||
1167  h->sps.chroma_format_idc != h1->sps.chroma_format_idc ||
1168  h->sps.colorspace != h1->sps.colorspace)) {
1169 
1171 
1172  s->width = s1->width;
1173  s->height = s1->height;
1174  s->mb_height = s1->mb_height;
1175  h->b_stride = h1->b_stride;
1176 
1177  if ((err = h264_slice_header_init(h, 1)) < 0) {
1178  av_log(h->s.avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
1179  return err;
1180  }
1181  h->context_reinitialized = 1;
1182 
1183  /* update linesize on resize for h264. The h264 decoder doesn't
1184  * necessarily call ff_MPV_frame_start in the new thread */
1185  s->linesize = s1->linesize;
1186  s->uvlinesize = s1->uvlinesize;
1187 
1188  /* copy block_offset since frame_start may not be called */
1189  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
1191  }
1192 
1193  err = ff_mpeg_update_thread_context(dst, src);
1194  if (err)
1195  return err;
1196 
1197  if (!inited) {
1198  for (i = 0; i < MAX_SPS_COUNT; i++)
1199  av_freep(h->sps_buffers + i);
1200 
1201  for (i = 0; i < MAX_PPS_COUNT; i++)
1202  av_freep(h->pps_buffers + i);
1203 
1204  // copy all fields after MpegEnc
1205  memcpy(&h->s + 1, &h1->s + 1,
1206  sizeof(H264Context) - sizeof(MpegEncContext));
1207  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1208  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1209  if (ff_h264_alloc_tables(h) < 0) {
1210  av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
1211  return AVERROR(ENOMEM);
1212  }
1213  context_init(h);
1214 
1215  for (i = 0; i < 2; i++) {
1216  h->rbsp_buffer[i] = NULL;
1217  h->rbsp_buffer_size[i] = 0;
1218  }
1219  h->bipred_scratchpad = NULL;
1220 
1221  h->thread_context[0] = h;
1222 
1223  s->dsp.clear_blocks(h->mb);
1224  s->dsp.clear_blocks(h->mb + (24 * 16 << h->pixel_shift));
1225  }
1226 
1227  /* frame_start may not be called for the next thread (if it's decoding
1228  * a bottom field) so this has to be allocated here */
1229  if (!h->bipred_scratchpad)
1230  h->bipred_scratchpad = av_malloc(16 * 6 * s->linesize);
1231 
1232  // extradata/NAL handling
1233  h->is_avc = h1->is_avc;
1234 
1235  // SPS/PPS
1236  copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers,
1237  MAX_SPS_COUNT, sizeof(SPS));
1238  h->sps = h1->sps;
1239  copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers,
1240  MAX_PPS_COUNT, sizeof(PPS));
1241  h->pps = h1->pps;
1242 
1243  // Dequantization matrices
1244  // FIXME these are big - can they be only copied when PPS changes?
1245  copy_fields(h, h1, dequant4_buffer, dequant4_coeff);
1246 
1247  for (i = 0; i < 6; i++)
1248  h->dequant4_coeff[i] = h->dequant4_buffer[0] +
1249  (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]);
1250 
1251  for (i = 0; i < 6; i++)
1252  h->dequant8_coeff[i] = h->dequant8_buffer[0] +
1253  (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]);
1254 
1255  h->dequant_coeff_pps = h1->dequant_coeff_pps;
1256 
1257  // POC timing
1258  copy_fields(h, h1, poc_lsb, redundant_pic_count);
1259 
1260  // reference lists
1261  copy_fields(h, h1, ref_count, list_count);
1262  copy_fields(h, h1, ref_list, intra_gb);
1263  copy_fields(h, h1, short_ref, cabac_init_idc);
1264 
1265  copy_picture_range(h->short_ref, h1->short_ref, 32, s, s1);
1266  copy_picture_range(h->long_ref, h1->long_ref, 32, s, s1);
1267  copy_picture_range(h->delayed_pic, h1->delayed_pic,
1268  MAX_DELAYED_PIC_COUNT + 2, s, s1);
1269 
1270  h->last_slice_type = h1->last_slice_type;
1271 
1272  if (!s->current_picture_ptr)
1273  return 0;
1274 
1275  if (!s->droppable) {
1277  h->prev_poc_msb = h->poc_msb;
1278  h->prev_poc_lsb = h->poc_lsb;
1279  }
1281  h->prev_frame_num = h->frame_num;
1283 
1284  return err;
1285 }
1286 
1288 {
1289  MpegEncContext *const s = &h->s;
1290  int i;
1291  const int pixel_shift = h->pixel_shift;
1292 
1293  h->next_output_pic = NULL;
1294 
1295  if (ff_MPV_frame_start(s, s->avctx) < 0)
1296  return -1;
1297  ff_er_frame_start(s);
1298  /*
1299  * ff_MPV_frame_start uses pict_type to derive key_frame.
1300  * This is incorrect for H.264; IDR markings must be used.
1301  * Zero here; IDR markings per slice in frame or fields are ORed in later.
1302  * See decode_nal_units().
1303  */
1306 
1307  assert(s->linesize && s->uvlinesize);
1308 
1309  for (i = 0; i < 16; i++) {
1310  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * s->linesize * ((scan8[i] - scan8[0]) >> 3);
1311  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * s->linesize * ((scan8[i] - scan8[0]) >> 3);
1312  }
1313  for (i = 0; i < 16; i++) {
1314  h->block_offset[16 + i] =
1315  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * s->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1316  h->block_offset[48 + 16 + i] =
1317  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * s->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1318  }
1319 
1320  /* can't be in alloc_tables because linesize isn't known there.
1321  * FIXME: redo bipred weight to not require extra buffer? */
1322  for (i = 0; i < s->slice_context_count; i++)
1323  if (h->thread_context[i] && !h->thread_context[i]->bipred_scratchpad)
1324  h->thread_context[i]->bipred_scratchpad = av_malloc(16 * 6 * s->linesize);
1325 
1326  /* Some macroblocks can be accessed before they're available in case
1327  * of lost slices, MBAFF or threading. */
1328  memset(h->slice_table, -1,
1329  (s->mb_height * s->mb_stride - 1) * sizeof(*h->slice_table));
1330 
1331  // s->decode = (s->flags & CODEC_FLAG_PSNR) || !s->encoding ||
1332  // s->current_picture.f.reference /* || h->contains_intra */ || 1;
1333 
1334  /* We mark the current picture as non-reference after allocating it, so
1335  * that if we break out due to an error it can be released automatically
1336  * in the next ff_MPV_frame_start().
1337  * SVQ3 as well as most other codecs have only last/next/current and thus
1338  * get released even with set reference, besides SVQ3 and others do not
1339  * mark frames as reference later "naturally". */
1340  if (s->codec_id != AV_CODEC_ID_SVQ3)
1342 
1344  s->current_picture_ptr->field_poc[1] = INT_MAX;
1345 
1346  assert(s->current_picture_ptr->long_ref == 0);
1347 
1348  return 0;
1349 }
1350 
1359 static void decode_postinit(H264Context *h, int setup_finished)
1360 {
1361  MpegEncContext *const s = &h->s;
1362  Picture *out = s->current_picture_ptr;
1363  Picture *cur = s->current_picture_ptr;
1364  int i, pics, out_of_order, out_idx;
1365  int invalid = 0, cnt = 0;
1366 
1369 
1370  if (h->next_output_pic)
1371  return;
1372 
1373  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
1374  /* FIXME: if we have two PAFF fields in one packet, we can't start
1375  * the next thread here. If we have one field per packet, we can.
1376  * The check in decode_nal_units() is not good enough to find this
1377  * yet, so we assume the worst for now. */
1378  // if (setup_finished)
1379  // ff_thread_finish_setup(s->avctx);
1380  return;
1381  }
1382 
1383  cur->f.interlaced_frame = 0;
1384  cur->f.repeat_pict = 0;
1385 
1386  /* Signal interlacing information externally. */
1387  /* Prioritize picture timing SEI information over used
1388  * decoding process if it exists. */
1389 
1390  if (h->sps.pic_struct_present_flag) {
1391  switch (h->sei_pic_struct) {
1392  case SEI_PIC_STRUCT_FRAME:
1393  break;
1396  cur->f.interlaced_frame = 1;
1397  break;
1401  cur->f.interlaced_frame = 1;
1402  else
1403  // try to flag soft telecine progressive
1405  break;
1408  /* Signal the possibility of telecined film externally
1409  * (pic_struct 5,6). From these hints, let the applications
1410  * decide if they apply deinterlacing. */
1411  cur->f.repeat_pict = 1;
1412  break;
1414  // Force progressive here, doubling interlaced frame is a bad idea.
1415  cur->f.repeat_pict = 2;
1416  break;
1418  cur->f.repeat_pict = 4;
1419  break;
1420  }
1421 
1422  if ((h->sei_ct_type & 3) &&
1424  cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
1425  } else {
1426  /* Derive interlacing flag from used decoding process. */
1428  }
1430 
1431  if (cur->field_poc[0] != cur->field_poc[1]) {
1432  /* Derive top_field_first from field pocs. */
1433  cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
1434  } else {
1435  if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
1436  /* Use picture timing SEI information. Even if it is a
1437  * information of a past frame, better than nothing. */
1440  cur->f.top_field_first = 1;
1441  else
1442  cur->f.top_field_first = 0;
1443  } else {
1444  /* Most likely progressive */
1445  cur->f.top_field_first = 0;
1446  }
1447  }
1448 
1449  // FIXME do something with unavailable reference frames
1450 
1451  /* Sort B-frames into display order */
1452 
1456  s->low_delay = 0;
1457  }
1458 
1462  s->low_delay = 0;
1463  }
1464 
1465  pics = 0;
1466  while (h->delayed_pic[pics])
1467  pics++;
1468 
1469  assert(pics <= MAX_DELAYED_PIC_COUNT);
1470 
1471  h->delayed_pic[pics++] = cur;
1472  if (cur->f.reference == 0)
1473  cur->f.reference = DELAYED_PIC_REF;
1474 
1475  /* Frame reordering. This code takes pictures from coding order and sorts
1476  * them by their incremental POC value into display order. It supports POC
1477  * gaps, MMCO reset codes and random resets.
1478  * A "display group" can start either with a IDR frame (f.key_frame = 1),
1479  * and/or can be closed down with a MMCO reset code. In sequences where
1480  * there is no delay, we can't detect that (since the frame was already
1481  * output to the user), so we also set h->mmco_reset to detect the MMCO
1482  * reset code.
1483  * FIXME: if we detect insufficient delays (as per s->avctx->has_b_frames),
1484  * we increase the delay between input and output. All frames affected by
1485  * the lag (e.g. those that should have been output before another frame
1486  * that we already returned to the user) will be dropped. This is a bug
1487  * that we will fix later. */
1488  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
1489  cnt += out->poc < h->last_pocs[i];
1490  invalid += out->poc == INT_MIN;
1491  }
1492  if (!h->mmco_reset && !cur->f.key_frame &&
1493  cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
1494  h->mmco_reset = 2;
1495  if (pics > 1)
1496  h->delayed_pic[pics - 2]->mmco_reset = 2;
1497  }
1498  if (h->mmco_reset || cur->f.key_frame) {
1499  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1500  h->last_pocs[i] = INT_MIN;
1501  cnt = 0;
1502  invalid = MAX_DELAYED_PIC_COUNT;
1503  }
1504  out = h->delayed_pic[0];
1505  out_idx = 0;
1506  for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
1507  h->delayed_pic[i] &&
1508  !h->delayed_pic[i - 1]->mmco_reset &&
1509  !h->delayed_pic[i]->f.key_frame;
1510  i++)
1511  if (h->delayed_pic[i]->poc < out->poc) {
1512  out = h->delayed_pic[i];
1513  out_idx = i;
1514  }
1515  if (s->avctx->has_b_frames == 0 &&
1516  (h->delayed_pic[0]->f.key_frame || h->mmco_reset))
1517  h->next_outputed_poc = INT_MIN;
1518  out_of_order = !out->f.key_frame && !h->mmco_reset &&
1519  (out->poc < h->next_outputed_poc);
1520 
1523  } else if (out_of_order && pics - 1 == s->avctx->has_b_frames &&
1524  s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
1525  if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
1526  s->avctx->has_b_frames = FFMAX(s->avctx->has_b_frames, cnt);
1527  }
1528  s->low_delay = 0;
1529  } else if (s->low_delay &&
1530  ((h->next_outputed_poc != INT_MIN &&
1531  out->poc > h->next_outputed_poc + 2) ||
1532  cur->f.pict_type == AV_PICTURE_TYPE_B)) {
1533  s->low_delay = 0;
1534  s->avctx->has_b_frames++;
1535  }
1536 
1537  if (pics > s->avctx->has_b_frames) {
1538  out->f.reference &= ~DELAYED_PIC_REF;
1539  // for frame threading, the owner must be the second field's thread or
1540  // else the first thread can release the picture and reuse it unsafely
1541  out->owner2 = s;
1542  for (i = out_idx; h->delayed_pic[i]; i++)
1543  h->delayed_pic[i] = h->delayed_pic[i + 1];
1544  }
1545  memmove(h->last_pocs, &h->last_pocs[1],
1546  sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
1547  h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
1548  if (!out_of_order && pics > s->avctx->has_b_frames) {
1549  h->next_output_pic = out;
1550  if (out->mmco_reset) {
1551  if (out_idx > 0) {
1552  h->next_outputed_poc = out->poc;
1553  h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
1554  } else {
1555  h->next_outputed_poc = INT_MIN;
1556  }
1557  } else {
1558  if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f.key_frame) {
1559  h->next_outputed_poc = INT_MIN;
1560  } else {
1561  h->next_outputed_poc = out->poc;
1562  }
1563  }
1564  h->mmco_reset = 0;
1565  } else {
1566  av_log(s->avctx, AV_LOG_DEBUG, "no picture\n");
1567  }
1568 
1569  if (setup_finished)
1571 }
1572 
1574  uint8_t *src_cb, uint8_t *src_cr,
1575  int linesize, int uvlinesize,
1576  int simple)
1577 {
1578  MpegEncContext *const s = &h->s;
1579  uint8_t *top_border;
1580  int top_idx = 1;
1581  const int pixel_shift = h->pixel_shift;
1582  int chroma444 = CHROMA444;
1583  int chroma422 = CHROMA422;
1584 
1585  src_y -= linesize;
1586  src_cb -= uvlinesize;
1587  src_cr -= uvlinesize;
1588 
1589  if (!simple && FRAME_MBAFF) {
1590  if (s->mb_y & 1) {
1591  if (!MB_MBAFF) {
1592  top_border = h->top_borders[0][s->mb_x];
1593  AV_COPY128(top_border, src_y + 15 * linesize);
1594  if (pixel_shift)
1595  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
1596  if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1597  if (chroma444) {
1598  if (pixel_shift) {
1599  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
1600  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
1601  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
1602  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
1603  } else {
1604  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
1605  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
1606  }
1607  } else if (chroma422) {
1608  if (pixel_shift) {
1609  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
1610  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
1611  } else {
1612  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
1613  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
1614  }
1615  } else {
1616  if (pixel_shift) {
1617  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
1618  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
1619  } else {
1620  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1621  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1622  }
1623  }
1624  }
1625  }
1626  } else if (MB_MBAFF) {
1627  top_idx = 0;
1628  } else
1629  return;
1630  }
1631 
1632  top_border = h->top_borders[top_idx][s->mb_x];
1633  /* There are two lines saved, the line above the top macroblock
1634  * of a pair, and the line above the bottom macroblock. */
1635  AV_COPY128(top_border, src_y + 16 * linesize);
1636  if (pixel_shift)
1637  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
1638 
1639  if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1640  if (chroma444) {
1641  if (pixel_shift) {
1642  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
1643  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
1644  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
1645  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
1646  } else {
1647  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
1648  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
1649  }
1650  } else if (chroma422) {
1651  if (pixel_shift) {
1652  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
1653  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
1654  } else {
1655  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
1656  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
1657  }
1658  } else {
1659  if (pixel_shift) {
1660  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
1661  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
1662  } else {
1663  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
1664  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
1665  }
1666  }
1667  }
1668 }
1669 
1671  uint8_t *src_cb, uint8_t *src_cr,
1672  int linesize, int uvlinesize,
1673  int xchg, int chroma444,
1674  int simple, int pixel_shift)
1675 {
1676  MpegEncContext *const s = &h->s;
1677  int deblock_topleft;
1678  int deblock_top;
1679  int top_idx = 1;
1680  uint8_t *top_border_m1;
1681  uint8_t *top_border;
1682 
1683  if (!simple && FRAME_MBAFF) {
1684  if (s->mb_y & 1) {
1685  if (!MB_MBAFF)
1686  return;
1687  } else {
1688  top_idx = MB_MBAFF ? 0 : 1;
1689  }
1690  }
1691 
1692  if (h->deblocking_filter == 2) {
1693  deblock_topleft = h->slice_table[h->mb_xy - 1 - s->mb_stride] == h->slice_num;
1694  deblock_top = h->top_type;
1695  } else {
1696  deblock_topleft = (s->mb_x > 0);
1697  deblock_top = (s->mb_y > !!MB_FIELD);
1698  }
1699 
1700  src_y -= linesize + 1 + pixel_shift;
1701  src_cb -= uvlinesize + 1 + pixel_shift;
1702  src_cr -= uvlinesize + 1 + pixel_shift;
1703 
1704  top_border_m1 = h->top_borders[top_idx][s->mb_x - 1];
1705  top_border = h->top_borders[top_idx][s->mb_x];
1706 
1707 #define XCHG(a, b, xchg) \
1708  if (pixel_shift) { \
1709  if (xchg) { \
1710  AV_SWAP64(b + 0, a + 0); \
1711  AV_SWAP64(b + 8, a + 8); \
1712  } else { \
1713  AV_COPY128(b, a); \
1714  } \
1715  } else if (xchg) \
1716  AV_SWAP64(b, a); \
1717  else \
1718  AV_COPY64(b, a);
1719 
1720  if (deblock_top) {
1721  if (deblock_topleft) {
1722  XCHG(top_border_m1 + (8 << pixel_shift),
1723  src_y - (7 << pixel_shift), 1);
1724  }
1725  XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
1726  XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
1727  if (s->mb_x + 1 < s->mb_width) {
1728  XCHG(h->top_borders[top_idx][s->mb_x + 1],
1729  src_y + (17 << pixel_shift), 1);
1730  }
1731  }
1732  if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1733  if (chroma444) {
1734  if (deblock_topleft) {
1735  XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
1736  XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
1737  }
1738  XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
1739  XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
1740  XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
1741  XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
1742  if (s->mb_x + 1 < s->mb_width) {
1743  XCHG(h->top_borders[top_idx][s->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
1744  XCHG(h->top_borders[top_idx][s->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
1745  }
1746  } else {
1747  if (deblock_top) {
1748  if (deblock_topleft) {
1749  XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
1750  XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
1751  }
1752  XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
1753  XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
1754  }
1755  }
1756  }
1757 }
1758 
1759 static av_always_inline int dctcoef_get(DCTELEM *mb, int high_bit_depth,
1760  int index)
1761 {
1762  if (high_bit_depth) {
1763  return AV_RN32A(((int32_t *)mb) + index);
1764  } else
1765  return AV_RN16A(mb + index);
1766 }
1767 
1768 static av_always_inline void dctcoef_set(DCTELEM *mb, int high_bit_depth,
1769  int index, int value)
1770 {
1771  if (high_bit_depth) {
1772  AV_WN32A(((int32_t *)mb) + index, value);
1773  } else
1774  AV_WN16A(mb + index, value);
1775 }
1776 
1778  int mb_type, int is_h264,
1779  int simple,
1780  int transform_bypass,
1781  int pixel_shift,
1782  int *block_offset,
1783  int linesize,
1784  uint8_t *dest_y, int p)
1785 {
1786  MpegEncContext *const s = &h->s;
1787  void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
1788  void (*idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride);
1789  int i;
1790  int qscale = p == 0 ? s->qscale : h->chroma_qp[p - 1];
1791  block_offset += 16 * p;
1792  if (IS_INTRA4x4(mb_type)) {
1793  if (simple || !s->encoding) {
1794  if (IS_8x8DCT(mb_type)) {
1795  if (transform_bypass) {
1796  idct_dc_add =
1797  idct_add = s->dsp.add_pixels8;
1798  } else {
1799  idct_dc_add = h->h264dsp.h264_idct8_dc_add;
1801  }
1802  for (i = 0; i < 16; i += 4) {
1803  uint8_t *const ptr = dest_y + block_offset[i];
1804  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
1805  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
1806  h->hpc.pred8x8l_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1807  } else {
1808  const int nnz = h->non_zero_count_cache[scan8[i + p * 16]];
1809  h->hpc.pred8x8l[dir](ptr, (h->topleft_samples_available << i) & 0x8000,
1810  (h->topright_samples_available << i) & 0x4000, linesize);
1811  if (nnz) {
1812  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
1813  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1814  else
1815  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1816  }
1817  }
1818  }
1819  } else {
1820  if (transform_bypass) {
1821  idct_dc_add =
1822  idct_add = s->dsp.add_pixels4;
1823  } else {
1824  idct_dc_add = h->h264dsp.h264_idct_dc_add;
1826  }
1827  for (i = 0; i < 16; i++) {
1828  uint8_t *const ptr = dest_y + block_offset[i];
1829  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
1830 
1831  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
1832  h->hpc.pred4x4_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1833  } else {
1834  uint8_t *topright;
1835  int nnz, tr;
1836  uint64_t tr_high;
1837  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
1838  const int topright_avail = (h->topright_samples_available << i) & 0x8000;
1839  assert(s->mb_y || linesize <= block_offset[i]);
1840  if (!topright_avail) {
1841  if (pixel_shift) {
1842  tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
1843  topright = (uint8_t *)&tr_high;
1844  } else {
1845  tr = ptr[3 - linesize] * 0x01010101u;
1846  topright = (uint8_t *)&tr;
1847  }
1848  } else
1849  topright = ptr + (4 << pixel_shift) - linesize;
1850  } else
1851  topright = NULL;
1852 
1853  h->hpc.pred4x4[dir](ptr, topright, linesize);
1854  nnz = h->non_zero_count_cache[scan8[i + p * 16]];
1855  if (nnz) {
1856  if (is_h264) {
1857  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
1858  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1859  else
1860  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1861  } else if (CONFIG_SVQ3_DECODER)
1862  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize, qscale, 0);
1863  }
1864  }
1865  }
1866  }
1867  }
1868  } else {
1869  h->hpc.pred16x16[h->intra16x16_pred_mode](dest_y, linesize);
1870  if (is_h264) {
1872  if (!transform_bypass)
1873  h->h264dsp.h264_luma_dc_dequant_idct(h->mb + (p * 256 << pixel_shift),
1874  h->mb_luma_dc[p],
1875  h->dequant4_coeff[p][qscale][0]);
1876  else {
1877  static const uint8_t dc_mapping[16] = {
1878  0 * 16, 1 * 16, 4 * 16, 5 * 16,
1879  2 * 16, 3 * 16, 6 * 16, 7 * 16,
1880  8 * 16, 9 * 16, 12 * 16, 13 * 16,
1881  10 * 16, 11 * 16, 14 * 16, 15 * 16 };
1882  for (i = 0; i < 16; i++)
1883  dctcoef_set(h->mb + (p * 256 << pixel_shift),
1884  pixel_shift, dc_mapping[i],
1885  dctcoef_get(h->mb_luma_dc[p],
1886  pixel_shift, i));
1887  }
1888  }
1889  } else if (CONFIG_SVQ3_DECODER)
1890  ff_svq3_luma_dc_dequant_idct_c(h->mb + p * 256,
1891  h->mb_luma_dc[p], qscale);
1892  }
1893 }
1894 
1896  int is_h264, int simple,
1897  int transform_bypass,
1898  int pixel_shift,
1899  int *block_offset,
1900  int linesize,
1901  uint8_t *dest_y, int p)
1902 {
1903  MpegEncContext *const s = &h->s;
1904  void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
1905  int i;
1906  block_offset += 16 * p;
1907  if (!IS_INTRA4x4(mb_type)) {
1908  if (is_h264) {
1909  if (IS_INTRA16x16(mb_type)) {
1910  if (transform_bypass) {
1911  if (h->sps.profile_idc == 244 &&
1914  h->hpc.pred16x16_add[h->intra16x16_pred_mode](dest_y, block_offset,
1915  h->mb + (p * 256 << pixel_shift),
1916  linesize);
1917  } else {
1918  for (i = 0; i < 16; i++)
1919  if (h->non_zero_count_cache[scan8[i + p * 16]] ||
1920  dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
1921  s->dsp.add_pixels4(dest_y + block_offset[i],
1922  h->mb + (i * 16 + p * 256 << pixel_shift),
1923  linesize);
1924  }
1925  } else {
1926  h->h264dsp.h264_idct_add16intra(dest_y, block_offset,
1927  h->mb + (p * 256 << pixel_shift),
1928  linesize,
1929  h->non_zero_count_cache + p * 5 * 8);
1930  }
1931  } else if (h->cbp & 15) {
1932  if (transform_bypass) {
1933  const int di = IS_8x8DCT(mb_type) ? 4 : 1;
1934  idct_add = IS_8x8DCT(mb_type) ? s->dsp.add_pixels8
1935  : s->dsp.add_pixels4;
1936  for (i = 0; i < 16; i += di)
1937  if (h->non_zero_count_cache[scan8[i + p * 16]])
1938  idct_add(dest_y + block_offset[i],
1939  h->mb + (i * 16 + p * 256 << pixel_shift),
1940  linesize);
1941  } else {
1942  if (IS_8x8DCT(mb_type))
1943  h->h264dsp.h264_idct8_add4(dest_y, block_offset,
1944  h->mb + (p * 256 << pixel_shift),
1945  linesize,
1946  h->non_zero_count_cache + p * 5 * 8);
1947  else
1948  h->h264dsp.h264_idct_add16(dest_y, block_offset,
1949  h->mb + (p * 256 << pixel_shift),
1950  linesize,
1951  h->non_zero_count_cache + p * 5 * 8);
1952  }
1953  }
1954  } else if (CONFIG_SVQ3_DECODER) {
1955  for (i = 0; i < 16; i++)
1956  if (h->non_zero_count_cache[scan8[i + p * 16]] || h->mb[i * 16 + p * 256]) {
1957  // FIXME benchmark weird rule, & below
1958  uint8_t *const ptr = dest_y + block_offset[i];
1959  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize,
1960  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
1961  }
1962  }
1963  }
1964 }
1965 
1966 #define BITS 8
1967 #define SIMPLE 1
1968 #include "h264_mb_template.c"
1969 
1970 #undef BITS
1971 #define BITS 16
1972 #include "h264_mb_template.c"
1973 
1974 #undef SIMPLE
1975 #define SIMPLE 0
1976 #include "h264_mb_template.c"
1977 
1979 {
1980  MpegEncContext *const s = &h->s;
1981  const int mb_xy = h->mb_xy;
1982  const int mb_type = s->current_picture.f.mb_type[mb_xy];
1983  int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || s->qscale == 0;
1984 
1985  if (CHROMA444) {
1986  if (is_complex || h->pixel_shift)
1987  hl_decode_mb_444_complex(h);
1988  else
1989  hl_decode_mb_444_simple_8(h);
1990  } else if (is_complex) {
1991  hl_decode_mb_complex(h);
1992  } else if (h->pixel_shift) {
1993  hl_decode_mb_simple_16(h);
1994  } else
1995  hl_decode_mb_simple_8(h);
1996 }
1997 
1999 {
2000  MpegEncContext *const s = &h->s;
2001  int list, i;
2002  int luma_def, chroma_def;
2003 
2004  h->use_weight = 0;
2005  h->use_weight_chroma = 0;
2007  if (h->sps.chroma_format_idc)
2009  luma_def = 1 << h->luma_log2_weight_denom;
2010  chroma_def = 1 << h->chroma_log2_weight_denom;
2011 
2012  for (list = 0; list < 2; list++) {
2013  h->luma_weight_flag[list] = 0;
2014  h->chroma_weight_flag[list] = 0;
2015  for (i = 0; i < h->ref_count[list]; i++) {
2016  int luma_weight_flag, chroma_weight_flag;
2017 
2018  luma_weight_flag = get_bits1(&s->gb);
2019  if (luma_weight_flag) {
2020  h->luma_weight[i][list][0] = get_se_golomb(&s->gb);
2021  h->luma_weight[i][list][1] = get_se_golomb(&s->gb);
2022  if (h->luma_weight[i][list][0] != luma_def ||
2023  h->luma_weight[i][list][1] != 0) {
2024  h->use_weight = 1;
2025  h->luma_weight_flag[list] = 1;
2026  }
2027  } else {
2028  h->luma_weight[i][list][0] = luma_def;
2029  h->luma_weight[i][list][1] = 0;
2030  }
2031 
2032  if (h->sps.chroma_format_idc) {
2033  chroma_weight_flag = get_bits1(&s->gb);
2034  if (chroma_weight_flag) {
2035  int j;
2036  for (j = 0; j < 2; j++) {
2037  h->chroma_weight[i][list][j][0] = get_se_golomb(&s->gb);
2038  h->chroma_weight[i][list][j][1] = get_se_golomb(&s->gb);
2039  if (h->chroma_weight[i][list][j][0] != chroma_def ||
2040  h->chroma_weight[i][list][j][1] != 0) {
2041  h->use_weight_chroma = 1;
2042  h->chroma_weight_flag[list] = 1;
2043  }
2044  }
2045  } else {
2046  int j;
2047  for (j = 0; j < 2; j++) {
2048  h->chroma_weight[i][list][j][0] = chroma_def;
2049  h->chroma_weight[i][list][j][1] = 0;
2050  }
2051  }
2052  }
2053  }
2055  break;
2056  }
2057  h->use_weight = h->use_weight || h->use_weight_chroma;
2058  return 0;
2059 }
2060 
2066 static void implicit_weight_table(H264Context *h, int field)
2067 {
2068  MpegEncContext *const s = &h->s;
2069  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
2070 
2071  for (i = 0; i < 2; i++) {
2072  h->luma_weight_flag[i] = 0;
2073  h->chroma_weight_flag[i] = 0;
2074  }
2075 
2076  if (field < 0) {
2077  if (s->picture_structure == PICT_FRAME) {
2078  cur_poc = s->current_picture_ptr->poc;
2079  } else {
2080  cur_poc = s->current_picture_ptr->field_poc[s->picture_structure - 1];
2081  }
2082  if (h->ref_count[0] == 1 && h->ref_count[1] == 1 && !FRAME_MBAFF &&
2083  h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2 * cur_poc) {
2084  h->use_weight = 0;
2085  h->use_weight_chroma = 0;
2086  return;
2087  }
2088  ref_start = 0;
2089  ref_count0 = h->ref_count[0];
2090  ref_count1 = h->ref_count[1];
2091  } else {
2092  cur_poc = s->current_picture_ptr->field_poc[field];
2093  ref_start = 16;
2094  ref_count0 = 16 + 2 * h->ref_count[0];
2095  ref_count1 = 16 + 2 * h->ref_count[1];
2096  }
2097 
2098  h->use_weight = 2;
2099  h->use_weight_chroma = 2;
2100  h->luma_log2_weight_denom = 5;
2101  h->chroma_log2_weight_denom = 5;
2102 
2103  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
2104  int poc0 = h->ref_list[0][ref0].poc;
2105  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
2106  int w = 32;
2107  if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) {
2108  int poc1 = h->ref_list[1][ref1].poc;
2109  int td = av_clip(poc1 - poc0, -128, 127);
2110  if (td) {
2111  int tb = av_clip(cur_poc - poc0, -128, 127);
2112  int tx = (16384 + (FFABS(td) >> 1)) / td;
2113  int dist_scale_factor = (tb * tx + 32) >> 8;
2114  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
2115  w = 64 - dist_scale_factor;
2116  }
2117  }
2118  if (field < 0) {
2119  h->implicit_weight[ref0][ref1][0] =
2120  h->implicit_weight[ref0][ref1][1] = w;
2121  } else {
2122  h->implicit_weight[ref0][ref1][field] = w;
2123  }
2124  }
2125  }
2126 }
2127 
2131 static void idr(H264Context *h)
2132 {
2134  h->prev_frame_num = 0;
2135  h->prev_frame_num_offset = 0;
2136  h->prev_poc_msb =
2137  h->prev_poc_lsb = 0;
2138 }
2139 
2140 /* forget old pics after a seek */
2141 static void flush_change(H264Context *h)
2142 {
2143  int i;
2144  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
2145  h->last_pocs[i] = INT_MIN;
2146  h->outputed_poc = h->next_outputed_poc = INT_MIN;
2147  h->prev_interlaced_frame = 1;
2148  idr(h);
2149  if (h->s.current_picture_ptr)
2150  h->s.current_picture_ptr->f.reference = 0;
2151  h->s.first_field = 0;
2152  memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
2153  memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
2154  memset(h->default_ref_list[0], 0, sizeof(h->default_ref_list[0]));
2155  memset(h->default_ref_list[1], 0, sizeof(h->default_ref_list[1]));
2156  ff_h264_reset_sei(h);
2157 }
2158 
2159 /* forget old pics after a seek */
2160 static void flush_dpb(AVCodecContext *avctx)
2161 {
2162  H264Context *h = avctx->priv_data;
2163  int i;
2164 
2165  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
2166  if (h->delayed_pic[i])
2167  h->delayed_pic[i]->f.reference = 0;
2168  h->delayed_pic[i] = NULL;
2169  }
2170 
2171  flush_change(h);
2172  ff_mpeg_flush(avctx);
2173 }
2174 
2175 static int init_poc(H264Context *h)
2176 {
2177  MpegEncContext *const s = &h->s;
2178  const int max_frame_num = 1 << h->sps.log2_max_frame_num;
2179  int field_poc[2];
2180  Picture *cur = s->current_picture_ptr;
2181 
2183  if (h->frame_num < h->prev_frame_num)
2184  h->frame_num_offset += max_frame_num;
2185 
2186  if (h->sps.poc_type == 0) {
2187  const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
2188 
2189  if (h->poc_lsb < h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
2190  h->poc_msb = h->prev_poc_msb + max_poc_lsb;
2191  else if (h->poc_lsb > h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
2192  h->poc_msb = h->prev_poc_msb - max_poc_lsb;
2193  else
2194  h->poc_msb = h->prev_poc_msb;
2195  field_poc[0] =
2196  field_poc[1] = h->poc_msb + h->poc_lsb;
2197  if (s->picture_structure == PICT_FRAME)
2198  field_poc[1] += h->delta_poc_bottom;
2199  } else if (h->sps.poc_type == 1) {
2200  int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
2201  int i;
2202 
2203  if (h->sps.poc_cycle_length != 0)
2204  abs_frame_num = h->frame_num_offset + h->frame_num;
2205  else
2206  abs_frame_num = 0;
2207 
2208  if (h->nal_ref_idc == 0 && abs_frame_num > 0)
2209  abs_frame_num--;
2210 
2211  expected_delta_per_poc_cycle = 0;
2212  for (i = 0; i < h->sps.poc_cycle_length; i++)
2213  // FIXME integrate during sps parse
2214  expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
2215 
2216  if (abs_frame_num > 0) {
2217  int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
2218  int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
2219 
2220  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
2221  for (i = 0; i <= frame_num_in_poc_cycle; i++)
2222  expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
2223  } else
2224  expectedpoc = 0;
2225 
2226  if (h->nal_ref_idc == 0)
2227  expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
2228 
2229  field_poc[0] = expectedpoc + h->delta_poc[0];
2230  field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
2231 
2232  if (s->picture_structure == PICT_FRAME)
2233  field_poc[1] += h->delta_poc[1];
2234  } else {
2235  int poc = 2 * (h->frame_num_offset + h->frame_num);
2236 
2237  if (!h->nal_ref_idc)
2238  poc--;
2239 
2240  field_poc[0] = poc;
2241  field_poc[1] = poc;
2242  }
2243 
2245  s->current_picture_ptr->field_poc[0] = field_poc[0];
2247  s->current_picture_ptr->field_poc[1] = field_poc[1];
2248  cur->poc = FFMIN(cur->field_poc[0], cur->field_poc[1]);
2249 
2250  return 0;
2251 }
2252 
2257 {
2258  int i;
2259  for (i = 0; i < 16; i++) {
2260 #define T(x) (x >> 2) | ((x << 2) & 0xF)
2261  h->zigzag_scan[i] = T(zigzag_scan[i]);
2262  h->field_scan[i] = T(field_scan[i]);
2263 #undef T
2264  }
2265  for (i = 0; i < 64; i++) {
2266 #define T(x) (x >> 3) | ((x & 7) << 3)
2267  h->zigzag_scan8x8[i] = T(ff_zigzag_direct[i]);
2269  h->field_scan8x8[i] = T(field_scan8x8[i]);
2271 #undef T
2272  }
2273  if (h->sps.transform_bypass) { // FIXME same ugly
2280  } else {
2281  h->zigzag_scan_q0 = h->zigzag_scan;
2284  h->field_scan_q0 = h->field_scan;
2287  }
2288 }
2289 
2290 static int field_end(H264Context *h, int in_setup)
2291 {
2292  MpegEncContext *const s = &h->s;
2293  AVCodecContext *const avctx = s->avctx;
2294  int err = 0;
2295  s->mb_y = 0;
2296 
2297  if (!in_setup && !s->droppable)
2300 
2304 
2305  if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
2306  if (!s->droppable) {
2308  h->prev_poc_msb = h->poc_msb;
2309  h->prev_poc_lsb = h->poc_lsb;
2310  }
2312  h->prev_frame_num = h->frame_num;
2314  }
2315 
2316  if (avctx->hwaccel) {
2317  if (avctx->hwaccel->end_frame(avctx) < 0)
2318  av_log(avctx, AV_LOG_ERROR,
2319  "hardware accelerator failed to decode picture\n");
2320  }
2321 
2325 
2326  /*
2327  * FIXME: Error handling code does not seem to support interlaced
2328  * when slices span multiple rows
2329  * The ff_er_add_slice calls don't work right for bottom
2330  * fields; they cause massive erroneous error concealing
2331  * Error marking covers both fields (top and bottom).
2332  * This causes a mismatched s->error_count
2333  * and a bad error table. Further, the error count goes to
2334  * INT_MAX when called for bottom field, because mb_y is
2335  * past end by one (callers fault) and resync_mb_y != 0
2336  * causes problems for the first MB line, too.
2337  */
2338  if (!FIELD_PICTURE)
2339  ff_er_frame_end(s);
2340 
2341  ff_MPV_frame_end(s);
2342 
2343  h->current_slice = 0;
2344 
2345  return err;
2346 }
2347 
2351 static int clone_slice(H264Context *dst, H264Context *src)
2352 {
2353  int ret;
2354 
2355  memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset));
2357  dst->s.current_picture = src->s.current_picture;
2358  dst->s.linesize = src->s.linesize;
2359  dst->s.uvlinesize = src->s.uvlinesize;
2360  dst->s.first_field = src->s.first_field;
2361 
2362  if (!dst->s.edge_emu_buffer &&
2363  (ret = ff_mpv_frame_size_alloc(&dst->s, dst->s.linesize))) {
2364  av_log(dst->s.avctx, AV_LOG_ERROR,
2365  "Failed to allocate scratch buffers\n");
2366  return ret;
2367  }
2368 
2369  dst->prev_poc_msb = src->prev_poc_msb;
2370  dst->prev_poc_lsb = src->prev_poc_lsb;
2372  dst->prev_frame_num = src->prev_frame_num;
2373  dst->short_ref_count = src->short_ref_count;
2374 
2375  memcpy(dst->short_ref, src->short_ref, sizeof(dst->short_ref));
2376  memcpy(dst->long_ref, src->long_ref, sizeof(dst->long_ref));
2377  memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list));
2378  memcpy(dst->ref_list, src->ref_list, sizeof(dst->ref_list));
2379 
2380  memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff));
2381  memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff));
2382 
2383  return 0;
2384 }
2385 
2394 {
2395  int profile = sps->profile_idc;
2396 
2397  switch (sps->profile_idc) {
2399  // constraint_set1_flag set to 1
2400  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
2401  break;
2405  // constraint_set3_flag set to 1
2406  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
2407  break;
2408  }
2409 
2410  return profile;
2411 }
2412 
2414 {
2415  MpegEncContext *s = &h->s;
2416 
2417  if (s->flags & CODEC_FLAG_LOW_DELAY ||
2419  !h->sps.num_reorder_frames)) {
2420  if (s->avctx->has_b_frames > 1 || h->delayed_pic[0])
2421  av_log(h->s.avctx, AV_LOG_WARNING, "Delayed frames seen. "
2422  "Reenabling low delay requires a codec flush.\n");
2423  else
2424  s->low_delay = 1;
2425  }
2426 
2427  if (s->avctx->has_b_frames < 2)
2428  s->avctx->has_b_frames = !s->low_delay;
2429 
2430  if (s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
2432  if (s->avctx->codec &&
2434  (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) {
2436  "VDPAU decoding does not support video colorspace.\n");
2437  return AVERROR_INVALIDDATA;
2438  }
2439  if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
2442  h->pixel_shift = h->sps.bit_depth_luma > 8;
2443 
2445  h->sps.chroma_format_idc);
2447  h->sps.chroma_format_idc);
2448  s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
2449  ff_dsputil_init(&s->dsp, s->avctx);
2451  } else {
2452  av_log(s->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n",
2453  h->sps.bit_depth_luma);
2454  return AVERROR_INVALIDDATA;
2455  }
2456  }
2457  return 0;
2458 }
2459 
2461 {
2462  MpegEncContext *const s = &h->s;
2463  switch (h->sps.bit_depth_luma) {
2464  case 9:
2465  if (CHROMA444) {
2466  if (s->avctx->colorspace == AVCOL_SPC_RGB) {
2467  return AV_PIX_FMT_GBRP9;
2468  } else
2469  return AV_PIX_FMT_YUV444P9;
2470  } else if (CHROMA422)
2471  return AV_PIX_FMT_YUV422P9;
2472  else
2473  return AV_PIX_FMT_YUV420P9;
2474  break;
2475  case 10:
2476  if (CHROMA444) {
2477  if (s->avctx->colorspace == AVCOL_SPC_RGB) {
2478  return AV_PIX_FMT_GBRP10;
2479  } else
2480  return AV_PIX_FMT_YUV444P10;
2481  } else if (CHROMA422)
2482  return AV_PIX_FMT_YUV422P10;
2483  else
2484  return AV_PIX_FMT_YUV420P10;
2485  break;
2486  case 8:
2487  if (CHROMA444) {
2488  if (s->avctx->colorspace == AVCOL_SPC_RGB) {
2489  return AV_PIX_FMT_GBRP;
2490  } else
2493  } else if (CHROMA422) {
2496  } else {
2497  return s->avctx->get_format(s->avctx, s->avctx->codec->pix_fmts ?
2498  s->avctx->codec->pix_fmts :
2502  }
2503  break;
2504  default:
2506  "Unsupported bit depth: %d\n", h->sps.bit_depth_luma);
2507  return AVERROR_INVALIDDATA;
2508  }
2509 }
2510 
2511 static int h264_slice_header_init(H264Context *h, int reinit)
2512 {
2513  MpegEncContext *const s = &h->s;
2514  int i, ret;
2515 
2517  s->avctx->sample_aspect_ratio = h->sps.sar;
2519 
2520  if (h->sps.timing_info_present_flag) {
2521  int64_t den = h->sps.time_scale;
2522  if (h->x264_build < 44U)
2523  den *= 2;
2525  h->sps.num_units_in_tick, den, 1 << 30);
2526  }
2527 
2529 
2530  if (reinit) {
2531  free_tables(h, 0);
2532  if ((ret = ff_MPV_common_frame_size_change(s)) < 0) {
2533  av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_frame_size_change() failed.\n");
2534  return ret;
2535  }
2536  } else {
2537  if ((ret = ff_MPV_common_init(s) < 0)) {
2538  av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n");
2539  return ret;
2540  }
2541  }
2542  s->first_field = 0;
2543  h->prev_interlaced_frame = 1;
2544 
2545  init_scan_tables(h);
2546  if (ff_h264_alloc_tables(h) < 0) {
2547  av_log(h->s.avctx, AV_LOG_ERROR,
2548  "Could not allocate memory for h264\n");
2549  return AVERROR(ENOMEM);
2550  }
2551 
2553  if (context_init(h) < 0) {
2554  av_log(h->s.avctx, AV_LOG_ERROR, "context_init() failed.\n");
2555  return -1;
2556  }
2557  } else {
2558  for (i = 1; i < s->slice_context_count; i++) {
2559  H264Context *c;
2560  c = h->thread_context[i] = av_malloc(sizeof(H264Context));
2561  memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext));
2562  memset(&c->s + 1, 0, sizeof(H264Context) - sizeof(MpegEncContext));
2563  c->h264dsp = h->h264dsp;
2564  c->sps = h->sps;
2565  c->pps = h->pps;
2566  c->pixel_shift = h->pixel_shift;
2567  init_scan_tables(c);
2568  clone_tables(c, h, i);
2569  }
2570 
2571  for (i = 0; i < s->slice_context_count; i++)
2572  if (context_init(h->thread_context[i]) < 0) {
2573  av_log(h->s.avctx, AV_LOG_ERROR, "context_init() failed.\n");
2574  return -1;
2575  }
2576  }
2577 
2578  return 0;
2579 }
2580 
2592 {
2593  MpegEncContext *const s = &h->s;
2594  MpegEncContext *const s0 = &h0->s;
2595  unsigned int first_mb_in_slice;
2596  unsigned int pps_id;
2597  int num_ref_idx_active_override_flag, max_refs, ret;
2598  unsigned int slice_type, tmp, i, j;
2599  int default_ref_list_done = 0;
2600  int last_pic_structure, last_pic_droppable;
2601  int needs_reinit = 0;
2602 
2603  /* FIXME: 2tap qpel isn't implemented for high bit depth. */
2604  if ((s->avctx->flags2 & CODEC_FLAG2_FAST) &&
2605  !h->nal_ref_idc && !h->pixel_shift) {
2608  } else {
2611  }
2612 
2613  first_mb_in_slice = get_ue_golomb(&s->gb);
2614 
2615  if (first_mb_in_slice == 0) { // FIXME better field boundary detection
2616  if (h0->current_slice && FIELD_PICTURE) {
2617  field_end(h, 1);
2618  }
2619 
2620  h0->current_slice = 0;
2621  if (!s0->first_field) {
2622  if (s->current_picture_ptr && !s->droppable &&
2623  s->current_picture_ptr->owner2 == s) {
2626  }
2628  }
2629  }
2630 
2631  slice_type = get_ue_golomb_31(&s->gb);
2632  if (slice_type > 9) {
2633  av_log(h->s.avctx, AV_LOG_ERROR,
2634  "slice type too large (%d) at %d %d\n",
2635  h->slice_type, s->mb_x, s->mb_y);
2636  return -1;
2637  }
2638  if (slice_type > 4) {
2639  slice_type -= 5;
2640  h->slice_type_fixed = 1;
2641  } else
2642  h->slice_type_fixed = 0;
2643 
2644  slice_type = golomb_to_pict_type[slice_type];
2645  if (slice_type == AV_PICTURE_TYPE_I ||
2646  (h0->current_slice != 0 && slice_type == h0->last_slice_type)) {
2647  default_ref_list_done = 1;
2648  }
2649  h->slice_type = slice_type;
2650  h->slice_type_nos = slice_type & 3;
2651 
2652  if (h->nal_unit_type == NAL_IDR_SLICE &&
2654  av_log(h->s.avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
2655  return AVERROR_INVALIDDATA;
2656  }
2657 
2658  // to make a few old functions happy, it's wrong though
2659  s->pict_type = h->slice_type;
2660 
2661  pps_id = get_ue_golomb(&s->gb);
2662  if (pps_id >= MAX_PPS_COUNT) {
2663  av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n");
2664  return -1;
2665  }
2666  if (!h0->pps_buffers[pps_id]) {
2667  av_log(h->s.avctx, AV_LOG_ERROR,
2668  "non-existing PPS %u referenced\n",
2669  pps_id);
2670  return -1;
2671  }
2672  h->pps = *h0->pps_buffers[pps_id];
2673 
2674  if (!h0->sps_buffers[h->pps.sps_id]) {
2675  av_log(h->s.avctx, AV_LOG_ERROR,
2676  "non-existing SPS %u referenced\n",
2677  h->pps.sps_id);
2678  return -1;
2679  }
2680 
2681  if (h->pps.sps_id != h->current_sps_id ||
2682  h->context_reinitialized ||
2683  h0->sps_buffers[h->pps.sps_id]->new) {
2684  SPS *new_sps = h0->sps_buffers[h->pps.sps_id];
2685 
2686  h0->sps_buffers[h->pps.sps_id]->new = 0;
2687 
2688  if (h->sps.chroma_format_idc != new_sps->chroma_format_idc ||
2689  h->sps.bit_depth_luma != new_sps->bit_depth_luma)
2690  needs_reinit = 1;
2691 
2692  h->current_sps_id = h->pps.sps_id;
2693  h->sps = *h0->sps_buffers[h->pps.sps_id];
2694 
2695  if ((ret = h264_set_parameter_from_sps(h)) < 0)
2696  return ret;
2697  }
2698 
2699  s->avctx->profile = ff_h264_get_profile(&h->sps);
2700  s->avctx->level = h->sps.level_idc;
2701  s->avctx->refs = h->sps.ref_frame_count;
2702 
2703  if (s->mb_width != h->sps.mb_width ||
2704  s->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag))
2705  needs_reinit = 1;
2706 
2707  s->mb_width = h->sps.mb_width;
2708  s->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
2709 
2710  h->b_stride = s->mb_width * 4;
2711 
2712  s->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
2713 
2714  s->width = 16 * s->mb_width - (2 >> CHROMA444) * FFMIN(h->sps.crop_right, (8 << CHROMA444) - 1);
2715  if (h->sps.frame_mbs_only_flag)
2716  s->height = 16 * s->mb_height - (1 << s->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> s->chroma_y_shift) - 1);
2717  else
2718  s->height = 16 * s->mb_height - (2 << s->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> s->chroma_y_shift) - 1);
2719 
2720  if (FFALIGN(s->avctx->width, 16) == s->width &&
2721  FFALIGN(s->avctx->height, 16) == s->height) {
2722  s->width = s->avctx->width;
2723  s->height = s->avctx->height;
2724  }
2725 
2728  : AVCOL_RANGE_MPEG;
2730  if (s->avctx->colorspace != h->sps.colorspace)
2731  needs_reinit = 1;
2733  s->avctx->color_trc = h->sps.color_trc;
2734  s->avctx->colorspace = h->sps.colorspace;
2735  }
2736  }
2737 
2738  if (s->context_initialized &&
2739  (s->width != s->avctx->width ||
2740  s->height != s->avctx->height ||
2741  needs_reinit ||
2743 
2744  if (h != h0) {
2745  av_log(s->avctx, AV_LOG_ERROR, "changing width/height on "
2746  "slice %d\n", h0->current_slice + 1);
2747  return AVERROR_INVALIDDATA;
2748  }
2749 
2750  flush_change(h);
2751 
2752  if ((ret = get_pixel_format(h)) < 0)
2753  return ret;
2754  s->avctx->pix_fmt = ret;
2755 
2756  av_log(h->s.avctx, AV_LOG_INFO, "Reinit context to %dx%d, "
2757  "pix_fmt: %d\n", s->width, s->height, s->avctx->pix_fmt);
2758 
2759  if ((ret = h264_slice_header_init(h, 1)) < 0) {
2760  av_log(h->s.avctx, AV_LOG_ERROR,
2761  "h264_slice_header_init() failed\n");
2762  return ret;
2763  }
2764  h->context_reinitialized = 1;
2765  }
2766  if (!s->context_initialized) {
2767  if (h != h0) {
2768  av_log(h->s.avctx, AV_LOG_ERROR,
2769  "Cannot (re-)initialize context during parallel decoding.\n");
2770  return -1;
2771  }
2772 
2773  if ((ret = get_pixel_format(h)) < 0)
2774  return ret;
2775  s->avctx->pix_fmt = ret;
2776 
2777  if ((ret = h264_slice_header_init(h, 0)) < 0) {
2778  av_log(h->s.avctx, AV_LOG_ERROR,
2779  "h264_slice_header_init() failed\n");
2780  return ret;
2781  }
2782  }
2783 
2784  if (h == h0 && h->dequant_coeff_pps != pps_id) {
2785  h->dequant_coeff_pps = pps_id;
2787  }
2788 
2789  h->frame_num = get_bits(&s->gb, h->sps.log2_max_frame_num);
2790 
2791  h->mb_mbaff = 0;
2792  h->mb_aff_frame = 0;
2793  last_pic_structure = s0->picture_structure;
2794  last_pic_droppable = s0->droppable;
2795  s->droppable = h->nal_ref_idc == 0;
2796  if (h->sps.frame_mbs_only_flag) {
2798  } else {
2799  if (get_bits1(&s->gb)) { // field_pic_flag
2800  s->picture_structure = PICT_TOP_FIELD + get_bits1(&s->gb); // bottom_field_flag
2801  } else {
2803  h->mb_aff_frame = h->sps.mb_aff;
2804  }
2805  }
2807 
2808  if (h0->current_slice != 0) {
2809  if (last_pic_structure != s->picture_structure ||
2810  last_pic_droppable != s->droppable) {
2811  av_log(h->s.avctx, AV_LOG_ERROR,
2812  "Changing field mode (%d -> %d) between slices is not allowed\n",
2813  last_pic_structure, s->picture_structure);
2814  s->picture_structure = last_pic_structure;
2815  s->droppable = last_pic_droppable;
2816  return AVERROR_INVALIDDATA;
2817  } else if (!s0->current_picture_ptr) {
2819  "unset current_picture_ptr on %d. slice\n",
2820  h0->current_slice + 1);
2821  return AVERROR_INVALIDDATA;
2822  }
2823  } else {
2824  /* Shorten frame num gaps so we don't have to allocate reference
2825  * frames just to throw them away */
2826  if (h->frame_num != h->prev_frame_num) {
2827  int unwrap_prev_frame_num = h->prev_frame_num;
2828  int max_frame_num = 1 << h->sps.log2_max_frame_num;
2829 
2830  if (unwrap_prev_frame_num > h->frame_num)
2831  unwrap_prev_frame_num -= max_frame_num;
2832 
2833  if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) {
2834  unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1;
2835  if (unwrap_prev_frame_num < 0)
2836  unwrap_prev_frame_num += max_frame_num;
2837 
2838  h->prev_frame_num = unwrap_prev_frame_num;
2839  }
2840  }
2841 
2842  /* See if we have a decoded first field looking for a pair...
2843  * Here, we're using that to see if we should mark previously
2844  * decode frames as "finished".
2845  * We have to do that before the "dummy" in-between frame allocation,
2846  * since that can modify s->current_picture_ptr. */
2847  if (s0->first_field) {
2848  assert(s0->current_picture_ptr);
2849  assert(s0->current_picture_ptr->f.data[0]);
2851 
2852  /* Mark old field/frame as completed */
2853  if (!last_pic_droppable && s0->current_picture_ptr->owner2 == s0) {
2855  last_pic_structure == PICT_BOTTOM_FIELD);
2856  }
2857 
2858  /* figure out if we have a complementary field pair */
2859  if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
2860  /* Previous field is unmatched. Don't display it, but let it
2861  * remain for reference if marked as such. */
2862  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
2864  last_pic_structure == PICT_TOP_FIELD);
2865  }
2866  } else {
2867  if (s0->current_picture_ptr->frame_num != h->frame_num) {
2868  /* This and previous field were reference, but had
2869  * different frame_nums. Consider this field first in
2870  * pair. Throw away previous field except for reference
2871  * purposes. */
2872  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
2874  last_pic_structure == PICT_TOP_FIELD);
2875  }
2876  } else {
2877  /* Second field in complementary pair */
2878  if (!((last_pic_structure == PICT_TOP_FIELD &&
2880  (last_pic_structure == PICT_BOTTOM_FIELD &&
2883  "Invalid field mode combination %d/%d\n",
2884  last_pic_structure, s->picture_structure);
2885  s->picture_structure = last_pic_structure;
2886  s->droppable = last_pic_droppable;
2887  return AVERROR_INVALIDDATA;
2888  } else if (last_pic_droppable != s->droppable) {
2890  "Cannot combine reference and non-reference fields in the same frame\n");
2892  s->picture_structure = last_pic_structure;
2893  s->droppable = last_pic_droppable;
2894  return AVERROR_PATCHWELCOME;
2895  }
2896 
2897  /* Take ownership of this buffer. Note that if another thread owned
2898  * the first field of this buffer, we're not operating on that pointer,
2899  * so the original thread is still responsible for reporting progress
2900  * on that first field (or if that was us, we just did that above).
2901  * By taking ownership, we assign responsibility to ourselves to
2902  * report progress on the second field. */
2903  s0->current_picture_ptr->owner2 = s0;
2904  }
2905  }
2906  }
2907 
2908  while (h->frame_num != h->prev_frame_num &&
2909  h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
2910  Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
2911  av_log(h->s.avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
2912  h->frame_num, h->prev_frame_num);
2913  if (ff_h264_frame_start(h) < 0) {
2914  s0->first_field = 0;
2915  return -1;
2916  }
2917  h->prev_frame_num++;
2918  h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
2922  if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 &&
2924  return ret;
2925  if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 &&
2927  return AVERROR_INVALIDDATA;
2928  /* Error concealment: if a ref is missing, copy the previous ref in its place.
2929  * FIXME: avoiding a memcpy would be nice, but ref handling makes many assumptions
2930  * about there being no actual duplicates.
2931  * FIXME: this doesn't copy padding for out-of-frame motion vectors. Given we're
2932  * concealing a lost frame, this probably isn't noticeable by comparison, but it should
2933  * be fixed. */
2934  if (h->short_ref_count) {
2935  if (prev) {
2936  av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize,
2937  (const uint8_t **)prev->f.data, prev->f.linesize,
2938  s->avctx->pix_fmt, s->mb_width * 16, s->mb_height * 16);
2939  h->short_ref[0]->poc = prev->poc + 2;
2940  }
2941  h->short_ref[0]->frame_num = h->prev_frame_num;
2942  }
2943  }
2944 
2945  /* See if we have a decoded first field looking for a pair...
2946  * We're using that to see whether to continue decoding in that
2947  * frame, or to allocate a new one. */
2948  if (s0->first_field) {
2949  assert(s0->current_picture_ptr);
2950  assert(s0->current_picture_ptr->f.data[0]);
2952 
2953  /* figure out if we have a complementary field pair */
2954  if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
2955  /* Previous field is unmatched. Don't display it, but let it
2956  * remain for reference if marked as such. */
2957  s0->current_picture_ptr = NULL;
2958  s0->first_field = FIELD_PICTURE;
2959  } else {
2960  if (s0->current_picture_ptr->frame_num != h->frame_num) {
2961  /* This and the previous field had different frame_nums.
2962  * Consider this field first in pair. Throw away previous
2963  * one except for reference purposes. */
2964  s0->first_field = 1;
2965  s0->current_picture_ptr = NULL;
2966  } else {
2967  /* Second field in complementary pair */
2968  s0->first_field = 0;
2969  }
2970  }
2971  } else {
2972  /* Frame or first field in a potentially complementary pair */
2973  s0->first_field = FIELD_PICTURE;
2974  }
2975 
2976  if (!FIELD_PICTURE || s0->first_field) {
2977  if (ff_h264_frame_start(h) < 0) {
2978  s0->first_field = 0;
2979  return -1;
2980  }
2981  } else {
2983  }
2984  }
2985  if (h != h0 && (ret = clone_slice(h, h0)) < 0)
2986  return ret;
2987 
2988  s->current_picture_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup
2989 
2990  assert(s->mb_num == s->mb_width * s->mb_height);
2991  if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE >= s->mb_num ||
2992  first_mb_in_slice >= s->mb_num) {
2993  av_log(h->s.avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
2994  return -1;
2995  }
2996  s->resync_mb_x = s->mb_x = first_mb_in_slice % s->mb_width;
2997  s->resync_mb_y = s->mb_y = (first_mb_in_slice / s->mb_width) << FIELD_OR_MBAFF_PICTURE;
2999  s->resync_mb_y = s->mb_y = s->mb_y + 1;
3000  assert(s->mb_y < s->mb_height);
3001 
3002  if (s->picture_structure == PICT_FRAME) {
3003  h->curr_pic_num = h->frame_num;
3004  h->max_pic_num = 1 << h->sps.log2_max_frame_num;
3005  } else {
3006  h->curr_pic_num = 2 * h->frame_num + 1;
3007  h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1);
3008  }
3009 
3010  if (h->nal_unit_type == NAL_IDR_SLICE)
3011  get_ue_golomb(&s->gb); /* idr_pic_id */
3012 
3013  if (h->sps.poc_type == 0) {
3014  h->poc_lsb = get_bits(&s->gb, h->sps.log2_max_poc_lsb);
3015 
3016  if (h->pps.pic_order_present == 1 && s->picture_structure == PICT_FRAME)
3017  h->delta_poc_bottom = get_se_golomb(&s->gb);
3018  }
3019 
3020  if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) {
3021  h->delta_poc[0] = get_se_golomb(&s->gb);
3022 
3023  if (h->pps.pic_order_present == 1 && s->picture_structure == PICT_FRAME)
3024  h->delta_poc[1] = get_se_golomb(&s->gb);
3025  }
3026 
3027  init_poc(h);
3028 
3031 
3032  // set defaults, might be overridden a few lines later
3033  h->ref_count[0] = h->pps.ref_count[0];
3034  h->ref_count[1] = h->pps.ref_count[1];
3035 
3036  if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
3039  num_ref_idx_active_override_flag = get_bits1(&s->gb);
3040 
3041  if (num_ref_idx_active_override_flag) {
3042  h->ref_count[0] = get_ue_golomb(&s->gb) + 1;
3043  if (h->ref_count[0] < 1)
3044  return AVERROR_INVALIDDATA;
3045  if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
3046  h->ref_count[1] = get_ue_golomb(&s->gb) + 1;
3047  if (h->ref_count[1] < 1)
3048  return AVERROR_INVALIDDATA;
3049  }
3050  }
3051 
3053  h->list_count = 2;
3054  else
3055  h->list_count = 1;
3056  } else {
3057  h->list_count = 0;
3058  h->ref_count[0] = h->ref_count[1] = 0;
3059  }
3060 
3061 
3062  max_refs = s->picture_structure == PICT_FRAME ? 16 : 32;
3063 
3064  if (h->ref_count[0] > max_refs || h->ref_count[1] > max_refs) {
3065  av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
3066  h->ref_count[0] = h->ref_count[1] = 0;
3067  return AVERROR_INVALIDDATA;
3068  }
3069 
3070  if (!default_ref_list_done)
3072 
3073  if (h->slice_type_nos != AV_PICTURE_TYPE_I &&
3075  h->ref_count[1] = h->ref_count[0] = 0;
3076  return -1;
3077  }
3078 
3079  if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
3080  s->last_picture_ptr = &h->ref_list[0][0];
3081  s->last_picture_ptr->owner2 = s;
3083  }
3084  if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
3085  s->next_picture_ptr = &h->ref_list[1][0];
3086  s->next_picture_ptr->owner2 = s;
3088  }
3089 
3090  if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) ||
3091  (h->pps.weighted_bipred_idc == 1 &&
3093  pred_weight_table(h);
3094  else if (h->pps.weighted_bipred_idc == 2 &&
3096  implicit_weight_table(h, -1);
3097  } else {
3098  h->use_weight = 0;
3099  for (i = 0; i < 2; i++) {
3100  h->luma_weight_flag[i] = 0;
3101  h->chroma_weight_flag[i] = 0;
3102  }
3103  }
3104 
3105  // If frame-mt is enabled, only update mmco tables for the first slice
3106  // in a field. Subsequent slices can temporarily clobber h->mmco_index
3107  // or h->mmco, which will cause ref list mix-ups and decoding errors
3108  // further down the line. This may break decoding if the first slice is
3109  // corrupt, thus we only do this if frame-mt is enabled.
3110  if (h->nal_ref_idc &&
3113  h0->current_slice == 0) < 0 &&
3115  return AVERROR_INVALIDDATA;
3116 
3117  if (FRAME_MBAFF) {
3119 
3121  implicit_weight_table(h, 0);
3122  implicit_weight_table(h, 1);
3123  }
3124  }
3125 
3129 
3130  if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
3131  tmp = get_ue_golomb_31(&s->gb);
3132  if (tmp > 2) {
3133  av_log(s->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n");
3134  return -1;
3135  }
3136  h->cabac_init_idc = tmp;
3137  }
3138 
3139  h->last_qscale_diff = 0;
3140  tmp = h->pps.init_qp + get_se_golomb(&s->gb);
3141  if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) {
3142  av_log(s->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
3143  return -1;
3144  }
3145  s->qscale = tmp;
3146  h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
3147  h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
3148  // FIXME qscale / qp ... stuff
3149  if (h->slice_type == AV_PICTURE_TYPE_SP)
3150  get_bits1(&s->gb); /* sp_for_switch_flag */
3151  if (h->slice_type == AV_PICTURE_TYPE_SP ||
3153  get_se_golomb(&s->gb); /* slice_qs_delta */
3154 
3155  h->deblocking_filter = 1;
3156  h->slice_alpha_c0_offset = 0;
3157  h->slice_beta_offset = 0;
3159  tmp = get_ue_golomb_31(&s->gb);
3160  if (tmp > 2) {
3162  "deblocking_filter_idc %u out of range\n", tmp);
3163  return -1;
3164  }
3165  h->deblocking_filter = tmp;
3166  if (h->deblocking_filter < 2)
3167  h->deblocking_filter ^= 1; // 1<->0
3168 
3169  if (h->deblocking_filter) {
3170  h->slice_alpha_c0_offset = get_se_golomb(&s->gb) * 2;
3171  h->slice_beta_offset = get_se_golomb(&s->gb) * 2;
3172  if (h->slice_alpha_c0_offset > 12 ||
3173  h->slice_alpha_c0_offset < -12 ||
3174  h->slice_beta_offset > 12 ||
3175  h->slice_beta_offset < -12) {
3177  "deblocking filter parameters %d %d out of range\n",
3179  return -1;
3180  }
3181  }
3182  }
3183 
3184  if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
3190  h->nal_ref_idc == 0))
3191  h->deblocking_filter = 0;
3192 
3193  if (h->deblocking_filter == 1 && h0->max_contexts > 1) {
3194  if (s->avctx->flags2 & CODEC_FLAG2_FAST) {
3195  /* Cheat slightly for speed:
3196  * Do not bother to deblock across slices. */
3197  h->deblocking_filter = 2;
3198  } else {
3199  h0->max_contexts = 1;
3200  if (!h0->single_decode_warning) {
3201  av_log(s->avctx, AV_LOG_INFO,
3202  "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n");
3203  h0->single_decode_warning = 1;
3204  }
3205  if (h != h0) {
3206  av_log(h->s.avctx, AV_LOG_ERROR,
3207  "Deblocking switched inside frame.\n");
3208  return 1;
3209  }
3210  }
3211  }
3212  h->qp_thresh = 15 -
3214  FFMAX3(0,
3216  h->pps.chroma_qp_index_offset[1]) +
3217  6 * (h->sps.bit_depth_luma - 8);
3218 
3219  h0->last_slice_type = slice_type;
3220  h->slice_num = ++h0->current_slice;
3221  if (h->slice_num >= MAX_SLICES) {
3223  "Too many slices, increase MAX_SLICES and recompile\n");
3224  }
3225 
3226  for (j = 0; j < 2; j++) {
3227  int id_list[16];
3228  int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j];
3229  for (i = 0; i < 16; i++) {
3230  id_list[i] = 60;
3231  if (h->ref_list[j][i].f.data[0]) {
3232  int k;
3233  uint8_t *base = h->ref_list[j][i].f.base[0];
3234  for (k = 0; k < h->short_ref_count; k++)
3235  if (h->short_ref[k]->f.base[0] == base) {
3236  id_list[i] = k;
3237  break;
3238  }
3239  for (k = 0; k < h->long_ref_count; k++)
3240  if (h->long_ref[k] && h->long_ref[k]->f.base[0] == base) {
3241  id_list[i] = h->short_ref_count + k;
3242  break;
3243  }
3244  }
3245  }
3246 
3247  ref2frm[0] =
3248  ref2frm[1] = -1;
3249  for (i = 0; i < 16; i++)
3250  ref2frm[i + 2] = 4 * id_list[i] +
3251  (h->ref_list[j][i].f.reference & 3);
3252  ref2frm[18 + 0] =
3253  ref2frm[18 + 1] = -1;
3254  for (i = 16; i < 48; i++)
3255  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
3256  (h->ref_list[j][i].f.reference & 3);
3257  }
3258 
3259  // FIXME: fix draw_edges + PAFF + frame threads
3261  (!h->sps.frame_mbs_only_flag &&
3263  ? 0 : 16;
3265 
3266  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
3267  av_log(h->s.avctx, AV_LOG_DEBUG,
3268  "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
3269  h->slice_num,
3270  (s->picture_structure == PICT_FRAME ? "F" : s->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
3271  first_mb_in_slice,
3273  h->slice_type_fixed ? " fix" : "",
3274  h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
3275  pps_id, h->frame_num,
3278  h->ref_count[0], h->ref_count[1],
3279  s->qscale,
3280  h->deblocking_filter,
3282  h->use_weight,
3283  h->use_weight == 1 && h->use_weight_chroma ? "c" : "",
3284  h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
3285  }
3286 
3287  return 0;
3288 }
3289 
3291 {
3292  switch (h->slice_type) {
3293  case AV_PICTURE_TYPE_P:
3294  return 0;
3295  case AV_PICTURE_TYPE_B:
3296  return 1;
3297  case AV_PICTURE_TYPE_I:
3298  return 2;
3299  case AV_PICTURE_TYPE_SP:
3300  return 3;
3301  case AV_PICTURE_TYPE_SI:
3302  return 4;
3303  default:
3304  return -1;
3305  }
3306 }
3307 
3309  MpegEncContext *const s,
3310  int mb_type, int top_xy,
3311  int left_xy[LEFT_MBS],
3312  int top_type,
3313  int left_type[LEFT_MBS],
3314  int mb_xy, int list)
3315 {
3316  int b_stride = h->b_stride;
3317  int16_t(*mv_dst)[2] = &h->mv_cache[list][scan8[0]];
3318  int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
3319  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
3320  if (USES_LIST(top_type, list)) {
3321  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
3322  const int b8_xy = 4 * top_xy + 2;
3323  int (*ref2frm)[64] = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
3324  AV_COPY128(mv_dst - 1 * 8, s->current_picture.f.motion_val[list][b_xy + 0]);
3325  ref_cache[0 - 1 * 8] =
3326  ref_cache[1 - 1 * 8] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 0]];
3327  ref_cache[2 - 1 * 8] =
3328  ref_cache[3 - 1 * 8] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 1]];
3329  } else {
3330  AV_ZERO128(mv_dst - 1 * 8);
3331  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3332  }
3333 
3334  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
3335  if (USES_LIST(left_type[LTOP], list)) {
3336  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
3337  const int b8_xy = 4 * left_xy[LTOP] + 1;
3338  int (*ref2frm)[64] = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
3339  AV_COPY32(mv_dst - 1 + 0, s->current_picture.f.motion_val[list][b_xy + b_stride * 0]);
3340  AV_COPY32(mv_dst - 1 + 8, s->current_picture.f.motion_val[list][b_xy + b_stride * 1]);
3341  AV_COPY32(mv_dst - 1 + 16, s->current_picture.f.motion_val[list][b_xy + b_stride * 2]);
3342  AV_COPY32(mv_dst - 1 + 24, s->current_picture.f.motion_val[list][b_xy + b_stride * 3]);
3343  ref_cache[-1 + 0] =
3344  ref_cache[-1 + 8] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2 * 0]];
3345  ref_cache[-1 + 16] =
3346  ref_cache[-1 + 24] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2 * 1]];
3347  } else {
3348  AV_ZERO32(mv_dst - 1 + 0);
3349  AV_ZERO32(mv_dst - 1 + 8);
3350  AV_ZERO32(mv_dst - 1 + 16);
3351  AV_ZERO32(mv_dst - 1 + 24);
3352  ref_cache[-1 + 0] =
3353  ref_cache[-1 + 8] =
3354  ref_cache[-1 + 16] =
3355  ref_cache[-1 + 24] = LIST_NOT_USED;
3356  }
3357  }
3358  }
3359 
3360  if (!USES_LIST(mb_type, list)) {
3361  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
3362  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3363  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3364  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3365  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3366  return;
3367  }
3368 
3369  {
3370  int8_t *ref = &s->current_picture.f.ref_index[list][4 * mb_xy];
3371  int (*ref2frm)[64] = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
3372  uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
3373  uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
3374  AV_WN32A(&ref_cache[0 * 8], ref01);
3375  AV_WN32A(&ref_cache[1 * 8], ref01);
3376  AV_WN32A(&ref_cache[2 * 8], ref23);
3377  AV_WN32A(&ref_cache[3 * 8], ref23);
3378  }
3379 
3380  {
3381  int16_t(*mv_src)[2] = &s->current_picture.f.motion_val[list][4 * s->mb_x + 4 * s->mb_y * b_stride];
3382  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
3383  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
3384  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
3385  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
3386  }
3387 }
3388 
3393 static int fill_filter_caches(H264Context *h, int mb_type)
3394 {
3395  MpegEncContext *const s = &h->s;
3396  const int mb_xy = h->mb_xy;
3397  int top_xy, left_xy[LEFT_MBS];
3398  int top_type, left_type[LEFT_MBS];
3399  uint8_t *nnz;
3400  uint8_t *nnz_cache;
3401 
3402  top_xy = mb_xy - (s->mb_stride << MB_FIELD);
3403 
3404  /* Wow, what a mess, why didn't they simplify the interlacing & intra
3405  * stuff, I can't imagine that these complex rules are worth it. */
3406 
3407  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
3408  if (FRAME_MBAFF) {
3409  const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
3410  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
3411  if (s->mb_y & 1) {
3412  if (left_mb_field_flag != curr_mb_field_flag)
3413  left_xy[LTOP] -= s->mb_stride;
3414  } else {
3415  if (curr_mb_field_flag)
3416  top_xy += s->mb_stride &
3417  (((s->current_picture.f.mb_type[top_xy] >> 7) & 1) - 1);
3418  if (left_mb_field_flag != curr_mb_field_flag)
3419  left_xy[LBOT] += s->mb_stride;
3420  }
3421  }
3422 
3423  h->top_mb_xy = top_xy;
3424  h->left_mb_xy[LTOP] = left_xy[LTOP];
3425  h->left_mb_xy[LBOT] = left_xy[LBOT];
3426  {
3427  /* For sufficiently low qp, filtering wouldn't do anything.
3428  * This is a conservative estimate: could also check beta_offset
3429  * and more accurate chroma_qp. */
3430  int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
3431  int qp = s->current_picture.f.qscale_table[mb_xy];
3432  if (qp <= qp_thresh &&
3433  (left_xy[LTOP] < 0 ||
3434  ((qp + s->current_picture.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
3435  (top_xy < 0 ||
3436  ((qp + s->current_picture.f.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
3437  if (!FRAME_MBAFF)
3438  return 1;
3439  if ((left_xy[LTOP] < 0 ||
3440  ((qp + s->current_picture.f.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
3441  (top_xy < s->mb_stride ||
3442  ((qp + s->current_picture.f.qscale_table[top_xy - s->mb_stride] + 1) >> 1) <= qp_thresh))
3443  return 1;
3444  }
3445  }
3446 
3447  top_type = s->current_picture.f.mb_type[top_xy];
3448  left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
3449  left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
3450  if (h->deblocking_filter == 2) {
3451  if (h->slice_table[top_xy] != h->slice_num)
3452  top_type = 0;
3453  if (h->slice_table[left_xy[LBOT]] != h->slice_num)
3454  left_type[LTOP] = left_type[LBOT] = 0;
3455  } else {
3456  if (h->slice_table[top_xy] == 0xFFFF)
3457  top_type = 0;
3458  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
3459  left_type[LTOP] = left_type[LBOT] = 0;
3460  }
3461  h->top_type = top_type;
3462  h->left_type[LTOP] = left_type[LTOP];
3463  h->left_type[LBOT] = left_type[LBOT];
3464 
3465  if (IS_INTRA(mb_type))
3466  return 0;
3467 
3468  fill_filter_caches_inter(h, s, mb_type, top_xy, left_xy,
3469  top_type, left_type, mb_xy, 0);
3470  if (h->list_count == 2)
3471  fill_filter_caches_inter(h, s, mb_type, top_xy, left_xy,
3472  top_type, left_type, mb_xy, 1);
3473 
3474  nnz = h->non_zero_count[mb_xy];
3475  nnz_cache = h->non_zero_count_cache;
3476  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
3477  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
3478  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
3479  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
3480  h->cbp = h->cbp_table[mb_xy];
3481 
3482  if (top_type) {
3483  nnz = h->non_zero_count[top_xy];
3484  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
3485  }
3486 
3487  if (left_type[LTOP]) {
3488  nnz = h->non_zero_count[left_xy[LTOP]];
3489  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
3490  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
3491  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
3492  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
3493  }
3494 
3495  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
3496  * from what the loop filter needs */
3497  if (!CABAC && h->pps.transform_8x8_mode) {
3498  if (IS_8x8DCT(top_type)) {
3499  nnz_cache[4 + 8 * 0] =
3500  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
3501  nnz_cache[6 + 8 * 0] =
3502  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
3503  }
3504  if (IS_8x8DCT(left_type[LTOP])) {
3505  nnz_cache[3 + 8 * 1] =
3506  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
3507  }
3508  if (IS_8x8DCT(left_type[LBOT])) {
3509  nnz_cache[3 + 8 * 3] =
3510  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
3511  }
3512 
3513  if (IS_8x8DCT(mb_type)) {
3514  nnz_cache[scan8[0]] =
3515  nnz_cache[scan8[1]] =
3516  nnz_cache[scan8[2]] =
3517  nnz_cache[scan8[3]] = (h->cbp & 0x1000) >> 12;
3518 
3519  nnz_cache[scan8[0 + 4]] =
3520  nnz_cache[scan8[1 + 4]] =
3521  nnz_cache[scan8[2 + 4]] =
3522  nnz_cache[scan8[3 + 4]] = (h->cbp & 0x2000) >> 12;
3523 
3524  nnz_cache[scan8[0 + 8]] =
3525  nnz_cache[scan8[1 + 8]] =
3526  nnz_cache[scan8[2 + 8]] =
3527  nnz_cache[scan8[3 + 8]] = (h->cbp & 0x4000) >> 12;
3528 
3529  nnz_cache[scan8[0 + 12]] =
3530  nnz_cache[scan8[1 + 12]] =
3531  nnz_cache[scan8[2 + 12]] =
3532  nnz_cache[scan8[3 + 12]] = (h->cbp & 0x8000) >> 12;
3533  }
3534  }
3535 
3536  return 0;
3537 }
3538 
3539 static void loop_filter(H264Context *h, int start_x, int end_x)
3540 {
3541  MpegEncContext *const s = &h->s;
3542  uint8_t *dest_y, *dest_cb, *dest_cr;
3543  int linesize, uvlinesize, mb_x, mb_y;
3544  const int end_mb_y = s->mb_y + FRAME_MBAFF;
3545  const int old_slice_type = h->slice_type;
3546  const int pixel_shift = h->pixel_shift;
3547  const int block_h = 16 >> s->chroma_y_shift;
3548 
3549  if (h->deblocking_filter) {
3550  for (mb_x = start_x; mb_x < end_x; mb_x++)
3551  for (mb_y = end_mb_y - FRAME_MBAFF; mb_y <= end_mb_y; mb_y++) {
3552  int mb_xy, mb_type;
3553  mb_xy = h->mb_xy = mb_x + mb_y * s->mb_stride;
3554  h->slice_num = h->slice_table[mb_xy];
3555  mb_type = s->current_picture.f.mb_type[mb_xy];
3556  h->list_count = h->list_counts[mb_xy];
3557 
3558  if (FRAME_MBAFF)
3559  h->mb_mbaff =
3560  h->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
3561 
3562  s->mb_x = mb_x;
3563  s->mb_y = mb_y;
3564  dest_y = s->current_picture.f.data[0] +
3565  ((mb_x << pixel_shift) + mb_y * s->linesize) * 16;
3566  dest_cb = s->current_picture.f.data[1] +
3567  (mb_x << pixel_shift) * (8 << CHROMA444) +
3568  mb_y * s->uvlinesize * block_h;
3569  dest_cr = s->current_picture.f.data[2] +
3570  (mb_x << pixel_shift) * (8 << CHROMA444) +
3571  mb_y * s->uvlinesize * block_h;
3572  // FIXME simplify above
3573 
3574  if (MB_FIELD) {
3575  linesize = h->mb_linesize = s->linesize * 2;
3576  uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2;
3577  if (mb_y & 1) { // FIXME move out of this function?
3578  dest_y -= s->linesize * 15;
3579  dest_cb -= s->uvlinesize * (block_h - 1);
3580  dest_cr -= s->uvlinesize * (block_h - 1);
3581  }
3582  } else {
3583  linesize = h->mb_linesize = s->linesize;
3584  uvlinesize = h->mb_uvlinesize = s->uvlinesize;
3585  }
3586  backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize,
3587  uvlinesize, 0);
3588  if (fill_filter_caches(h, mb_type))
3589  continue;
3590  h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mb_xy]);
3591  h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mb_xy]);
3592 
3593  if (FRAME_MBAFF) {
3594  ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr,
3595  linesize, uvlinesize);
3596  } else {
3597  ff_h264_filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb,
3598  dest_cr, linesize, uvlinesize);
3599  }
3600  }
3601  }
3602  h->slice_type = old_slice_type;
3603  s->mb_x = end_x;
3604  s->mb_y = end_mb_y - FRAME_MBAFF;
3605  h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
3606  h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
3607 }
3608 
3610 {
3611  MpegEncContext *const s = &h->s;
3612  const int mb_xy = s->mb_x + s->mb_y * s->mb_stride;
3613  int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ?
3614  s->current_picture.f.mb_type[mb_xy - 1] :
3615  (h->slice_table[mb_xy - s->mb_stride] == h->slice_num) ?
3616  s->current_picture.f.mb_type[mb_xy - s->mb_stride] : 0;
3617  h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
3618 }
3619 
3624 {
3625  MpegEncContext *const s = &h->s;
3626  int top = 16 * (s->mb_y >> FIELD_PICTURE);
3627  int pic_height = 16 * s->mb_height >> FIELD_PICTURE;
3628  int height = 16 << FRAME_MBAFF;
3629  int deblock_border = (16 + 4) << FRAME_MBAFF;
3630 
3631  if (h->deblocking_filter) {
3632  if ((top + height) >= pic_height)
3633  height += deblock_border;
3634  top -= deblock_border;
3635  }
3636 
3637  if (top >= pic_height || (top + height) < h->emu_edge_height)
3638  return;
3639 
3640  height = FFMIN(height, pic_height - top);
3641  if (top < h->emu_edge_height) {
3642  height = top + height;
3643  top = 0;
3644  }
3645 
3646  ff_draw_horiz_band(s, top, height);
3647 
3648  if (s->droppable)
3649  return;
3650 
3651  ff_thread_report_progress(&s->current_picture_ptr->f, top + height - 1,
3653 }
3654 
3655 static int decode_slice(struct AVCodecContext *avctx, void *arg)
3656 {
3657  H264Context *h = *(void **)arg;
3658  MpegEncContext *const s = &h->s;
3659  const int part_mask = s->partitioned_frame ? (ER_AC_END | ER_AC_ERROR)
3660  : 0x7F;
3661  int lf_x_start = s->mb_x;
3662 
3663  s->mb_skip_run = -1;
3664 
3666  s->codec_id != AV_CODEC_ID_H264 ||
3667  (CONFIG_GRAY && (s->flags & CODEC_FLAG_GRAY));
3668 
3669  if (h->pps.cabac) {
3670  /* realign */
3671  align_get_bits(&s->gb);
3672 
3673  /* init cabac */
3676  s->gb.buffer + get_bits_count(&s->gb) / 8,
3677  (get_bits_left(&s->gb) + 7) / 8);
3678 
3680 
3681  for (;;) {
3682  // START_TIMER
3683  int ret = ff_h264_decode_mb_cabac(h);
3684  int eos;
3685  // STOP_TIMER("decode_mb_cabac")
3686 
3687  if (ret >= 0)
3689 
3690  // FIXME optimal? or let mb_decode decode 16x32 ?
3691  if (ret >= 0 && FRAME_MBAFF) {
3692  s->mb_y++;
3693 
3694  ret = ff_h264_decode_mb_cabac(h);
3695 
3696  if (ret >= 0)
3698  s->mb_y--;
3699  }
3700  eos = get_cabac_terminate(&h->cabac);
3701 
3702  if ((s->workaround_bugs & FF_BUG_TRUNCATED) &&
3703  h->cabac.bytestream > h->cabac.bytestream_end + 2) {
3704  ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1,
3705  s->mb_y, ER_MB_END & part_mask);
3706  if (s->mb_x >= lf_x_start)
3707  loop_filter(h, lf_x_start, s->mb_x + 1);
3708  return 0;
3709  }
3710  if (ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 2) {
3711  av_log(h->s.avctx, AV_LOG_ERROR,
3712  "error while decoding MB %d %d, bytestream (%td)\n",
3713  s->mb_x, s->mb_y,
3716  s->mb_y, ER_MB_ERROR & part_mask);
3717  return -1;
3718  }
3719 
3720  if (++s->mb_x >= s->mb_width) {
3721  loop_filter(h, lf_x_start, s->mb_x);
3722  s->mb_x = lf_x_start = 0;
3723  decode_finish_row(h);
3724  ++s->mb_y;
3725  if (FIELD_OR_MBAFF_PICTURE) {
3726  ++s->mb_y;
3727  if (FRAME_MBAFF && s->mb_y < s->mb_height)
3729  }
3730  }
3731 
3732  if (eos || s->mb_y >= s->mb_height) {
3733  tprintf(s->avctx, "slice end %d %d\n",
3734  get_bits_count(&s->gb), s->gb.size_in_bits);
3735  ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1,
3736  s->mb_y, ER_MB_END & part_mask);
3737  if (s->mb_x > lf_x_start)
3738  loop_filter(h, lf_x_start, s->mb_x);
3739  return 0;
3740  }
3741  }
3742  } else {
3743  for (;;) {
3744  int ret = ff_h264_decode_mb_cavlc(h);
3745 
3746  if (ret >= 0)
3748 
3749  // FIXME optimal? or let mb_decode decode 16x32 ?
3750  if (ret >= 0 && FRAME_MBAFF) {
3751  s->mb_y++;
3752  ret = ff_h264_decode_mb_cavlc(h);
3753 
3754  if (ret >= 0)
3756  s->mb_y--;
3757  }
3758 
3759  if (ret < 0) {
3760  av_log(h->s.avctx, AV_LOG_ERROR,
3761  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
3763  s->mb_y, ER_MB_ERROR & part_mask);
3764  return -1;
3765  }
3766 
3767  if (++s->mb_x >= s->mb_width) {
3768  loop_filter(h, lf_x_start, s->mb_x);
3769  s->mb_x = lf_x_start = 0;
3770  decode_finish_row(h);
3771  ++s->mb_y;
3772  if (FIELD_OR_MBAFF_PICTURE) {
3773  ++s->mb_y;
3774  if (FRAME_MBAFF && s->mb_y < s->mb_height)
3776  }
3777  if (s->mb_y >= s->mb_height) {
3778  tprintf(s->avctx, "slice end %d %d\n",
3779  get_bits_count(&s->gb), s->gb.size_in_bits);
3780 
3781  if (get_bits_left(&s->gb) == 0) {
3783  s->mb_x - 1, s->mb_y,
3784  ER_MB_END & part_mask);
3785 
3786  return 0;
3787  } else {
3789  s->mb_x - 1, s->mb_y,
3790  ER_MB_END & part_mask);
3791 
3792  return -1;
3793  }
3794  }
3795  }
3796 
3797  if (get_bits_left(&s->gb) <= 0 && s->mb_skip_run <= 0) {
3798  tprintf(s->avctx, "slice end %d %d\n",
3799  get_bits_count(&s->gb), s->gb.size_in_bits);
3800  if (get_bits_left(&s->gb) == 0) {
3802  s->mb_x - 1, s->mb_y,
3803  ER_MB_END & part_mask);
3804  if (s->mb_x > lf_x_start)
3805  loop_filter(h, lf_x_start, s->mb_x);
3806 
3807  return 0;
3808  } else {
3810  s->mb_y, ER_MB_ERROR & part_mask);
3811 
3812  return -1;
3813  }
3814  }
3815  }
3816  }
3817 }
3818 
3825 static int execute_decode_slices(H264Context *h, int context_count)
3826 {
3827  MpegEncContext *const s = &h->s;
3828  AVCodecContext *const avctx = s->avctx;
3829  H264Context *hx;
3830  int i;
3831 
3832  if (s->mb_y >= s->mb_height) {
3834  "Input contains more MB rows than the frame height.\n");
3835  return AVERROR_INVALIDDATA;
3836  }
3837 
3838  if (s->avctx->hwaccel ||
3840  return 0;
3841  if (context_count == 1) {
3842  return decode_slice(avctx, &h);
3843  } else {
3844  for (i = 1; i < context_count; i++) {
3845  hx = h->thread_context[i];
3846  hx->s.err_recognition = avctx->err_recognition;
3847  hx->s.error_count = 0;
3848  }
3849 
3850  avctx->execute(avctx, decode_slice, h->thread_context,
3851  NULL, context_count, sizeof(void *));
3852 
3853  /* pull back stuff from slices to master context */
3854  hx = h->thread_context[context_count - 1];
3855  s->mb_x = hx->s.mb_x;
3856  s->mb_y = hx->s.mb_y;
3857  s->droppable = hx->s.droppable;
3859  for (i = 1; i < context_count; i++)
3860  h->s.error_count += h->thread_context[i]->s.error_count;
3861  }
3862 
3863  return 0;
3864 }
3865 
3866 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
3867  int parse_extradata)
3868 {
3869  MpegEncContext *const s = &h->s;
3870  AVCodecContext *const avctx = s->avctx;
3871  H264Context *hx;
3872  int buf_index;
3873  int context_count;
3874  int next_avc;
3875  int pass = !(avctx->active_thread_type & FF_THREAD_FRAME);
3876  int nals_needed = 0;
3877  int nal_index;
3878 
3880  if (!(s->flags2 & CODEC_FLAG2_CHUNKS)) {
3881  h->current_slice = 0;
3882  if (!s->first_field)
3884  ff_h264_reset_sei(h);
3885  }
3886 
3887  for (; pass <= 1; pass++) {
3888  buf_index = 0;
3889  context_count = 0;
3890  next_avc = h->is_avc ? 0 : buf_size;
3891  nal_index = 0;
3892  for (;;) {
3893  int consumed;
3894  int dst_length;
3895  int bit_length;
3896  const uint8_t *ptr;
3897  int i, nalsize = 0;
3898  int err;
3899 
3900  if (buf_index >= next_avc) {
3901  if (buf_index >= buf_size - h->nal_length_size)
3902  break;
3903  nalsize = 0;
3904  for (i = 0; i < h->nal_length_size; i++)
3905  nalsize = (nalsize << 8) | buf[buf_index++];
3906  if (nalsize <= 0 || nalsize > buf_size - buf_index) {
3907  av_log(h->s.avctx, AV_LOG_ERROR,
3908  "AVC: nal size %d\n", nalsize);
3909  break;
3910  }
3911  next_avc = buf_index + nalsize;
3912  } else {
3913  // start code prefix search
3914  for (; buf_index + 3 < next_avc; buf_index++)
3915  // This should always succeed in the first iteration.
3916  if (buf[buf_index] == 0 &&
3917  buf[buf_index + 1] == 0 &&
3918  buf[buf_index + 2] == 1)
3919  break;
3920 
3921  if (buf_index + 3 >= buf_size) {
3922  buf_index = buf_size;
3923  break;
3924  }
3925 
3926  buf_index += 3;
3927  if (buf_index >= next_avc)
3928  continue;
3929  }
3930 
3931  hx = h->thread_context[context_count];
3932 
3933  ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length,
3934  &consumed, next_avc - buf_index);
3935  if (ptr == NULL || dst_length < 0) {
3936  buf_index = -1;
3937  goto end;
3938  }
3939  i = buf_index + consumed;
3940  if ((s->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
3941  buf[i] == 0x00 && buf[i + 1] == 0x00 &&
3942  buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
3944 
3945  if (!(s->workaround_bugs & FF_BUG_TRUNCATED))
3946  while (dst_length > 0 && ptr[dst_length - 1] == 0)
3947  dst_length--;
3948  bit_length = !dst_length ? 0
3949  : (8 * dst_length -
3950  decode_rbsp_trailing(h, ptr + dst_length - 1));
3951 
3952  if (s->avctx->debug & FF_DEBUG_STARTCODE)
3953  av_log(h->s.avctx, AV_LOG_DEBUG,
3954  "NAL %d at %d/%d length %d\n",
3955  hx->nal_unit_type, buf_index, buf_size, dst_length);
3956 
3957  if (h->is_avc && (nalsize != consumed) && nalsize)
3958  av_log(h->s.avctx, AV_LOG_DEBUG,
3959  "AVC: Consumed only %d bytes instead of %d\n",
3960  consumed, nalsize);
3961 
3962  buf_index += consumed;
3963  nal_index++;
3964 
3965  if (pass == 0) {
3966  /* packets can sometimes contain multiple PPS/SPS,
3967  * e.g. two PAFF field pictures in one packet, or a demuxer
3968  * which splits NALs strangely if so, when frame threading we
3969  * can't start the next thread until we've read all of them */
3970  switch (hx->nal_unit_type) {
3971  case NAL_SPS:
3972  case NAL_PPS:
3973  nals_needed = nal_index;
3974  break;
3975  case NAL_DPA:
3976  case NAL_IDR_SLICE:
3977  case NAL_SLICE:
3978  init_get_bits(&hx->s.gb, ptr, bit_length);
3979  if (!get_ue_golomb(&hx->s.gb))
3980  nals_needed = nal_index;
3981  }
3982  continue;
3983  }
3984 
3985  // FIXME do not discard SEI id
3986  if (avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0)
3987  continue;
3988 
3989 again:
3990  /* Ignore every NAL unit type except PPS and SPS during extradata
3991  * parsing. Decoding slices is not possible in codec init
3992  * with frame-mt */
3993  if (parse_extradata && HAVE_THREADS &&
3995  (hx->nal_unit_type != NAL_PPS &&
3996  hx->nal_unit_type != NAL_SPS)) {
3997  av_log(avctx, AV_LOG_INFO, "Ignoring NAL unit %d during "
3998  "extradata parsing\n", hx->nal_unit_type);
4000  }
4001  err = 0;
4002  switch (hx->nal_unit_type) {
4003  case NAL_IDR_SLICE:
4004  if (h->nal_unit_type != NAL_IDR_SLICE) {
4005  av_log(h->s.avctx, AV_LOG_ERROR,
4006  "Invalid mix of idr and non-idr slices\n");
4007  buf_index = -1;
4008  goto end;
4009  }
4010  idr(h); // FIXME ensure we don't lose some frames if there is reordering
4011  case NAL_SLICE:
4012  init_get_bits(&hx->s.gb, ptr, bit_length);
4013  hx->intra_gb_ptr =
4014  hx->inter_gb_ptr = &hx->s.gb;
4015  hx->s.data_partitioning = 0;
4016 
4017  if ((err = decode_slice_header(hx, h)))
4018  break;
4019 
4021  (hx->nal_unit_type == NAL_IDR_SLICE) ||
4022  (h->sei_recovery_frame_cnt >= 0);
4023 
4024  if (h->current_slice == 1) {
4025  if (!(s->flags2 & CODEC_FLAG2_CHUNKS))
4026  decode_postinit(h, nal_index >= nals_needed);
4027 
4028  if (s->avctx->hwaccel &&
4029  s->avctx->hwaccel->start_frame(s->avctx, NULL, 0) < 0)
4030  return -1;
4034  }
4035 
4036  if (hx->redundant_pic_count == 0 &&
4037  (avctx->skip_frame < AVDISCARD_NONREF ||
4038  hx->nal_ref_idc) &&
4039  (avctx->skip_frame < AVDISCARD_BIDIR ||
4041  (avctx->skip_frame < AVDISCARD_NONKEY ||
4043  avctx->skip_frame < AVDISCARD_ALL) {
4044  if (avctx->hwaccel) {
4045  if (avctx->hwaccel->decode_slice(avctx,
4046  &buf[buf_index - consumed],
4047  consumed) < 0)
4048  return -1;
4049  } else if (CONFIG_H264_VDPAU_DECODER &&
4051  static const uint8_t start_code[] = {
4052  0x00, 0x00, 0x01 };
4053  ff_vdpau_add_data_chunk(s, start_code,
4054  sizeof(start_code));
4055  ff_vdpau_add_data_chunk(s, &buf[buf_index - consumed],
4056  consumed);
4057  } else
4058  context_count++;
4059  }
4060  break;
4061  case NAL_DPA:
4062  if (s->flags2 & CODEC_FLAG2_CHUNKS) {
4063  av_log(h->s.avctx, AV_LOG_ERROR,
4064  "Decoding in chunks is not supported for "
4065  "partitioned slices.\n");
4066  return AVERROR(ENOSYS);
4067  }
4068 
4069  init_get_bits(&hx->s.gb, ptr, bit_length);
4070  hx->intra_gb_ptr =
4071  hx->inter_gb_ptr = NULL;
4072 
4073  if ((err = decode_slice_header(hx, h)) < 0) {
4074  /* make sure data_partitioning is cleared if it was set
4075  * before, so we don't try decoding a slice without a valid
4076  * slice header later */
4077  s->data_partitioning = 0;
4078  break;
4079  }
4080 
4081  hx->s.data_partitioning = 1;
4082  break;
4083  case NAL_DPB:
4084  init_get_bits(&hx->intra_gb, ptr, bit_length);
4085  hx->intra_gb_ptr = &hx->intra_gb;
4086  break;
4087  case NAL_DPC:
4088  init_get_bits(&hx->inter_gb, ptr, bit_length);
4089  hx->inter_gb_ptr = &hx->inter_gb;
4090 
4091  if (hx->redundant_pic_count == 0 &&
4092  hx->intra_gb_ptr &&
4093  hx->s.data_partitioning &&
4094  s->current_picture_ptr &&
4095  s->context_initialized &&
4096  (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) &&
4097  (avctx->skip_frame < AVDISCARD_BIDIR ||
4099  (avctx->skip_frame < AVDISCARD_NONKEY ||
4101  avctx->skip_frame < AVDISCARD_ALL)
4102  context_count++;
4103  break;
4104  case NAL_SEI:
4105  init_get_bits(&s->gb, ptr, bit_length);
4106  ff_h264_decode_sei(h);
4107  break;
4108  case NAL_SPS:
4109  init_get_bits(&s->gb, ptr, bit_length);
4110  if (ff_h264_decode_seq_parameter_set(h) < 0 &&
4111  h->is_avc && (nalsize != consumed) && nalsize) {
4112  av_log(h->s.avctx, AV_LOG_DEBUG,
4113  "SPS decoding failure, trying again with the complete NAL\n");
4114  init_get_bits(&s->gb, buf + buf_index + 1 - consumed,
4115  8 * (nalsize - 1));
4117  }
4118 
4119  if (h264_set_parameter_from_sps(h) < 0) {
4120  buf_index = -1;
4121  goto end;
4122  }
4123  break;
4124  case NAL_PPS:
4125  init_get_bits(&s->gb, ptr, bit_length);
4126  ff_h264_decode_picture_parameter_set(h, bit_length);
4127  break;
4128  case NAL_AUD:
4129  case NAL_END_SEQUENCE:
4130  case NAL_END_STREAM:
4131  case NAL_FILLER_DATA:
4132  case NAL_SPS_EXT:
4133  case NAL_AUXILIARY_SLICE:
4134  break;
4135  case NAL_FF_IGNORE:
4136  break;
4137  default:
4138  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
4139  hx->nal_unit_type, bit_length);
4140  }
4141 
4142  if (context_count == h->max_contexts) {
4143  execute_decode_slices(h, context_count);
4144  context_count = 0;
4145  }
4146 
4147  if (err < 0) {
4148  av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n");
4149  h->ref_count[0] = h->ref_count[1] = h->list_count = 0;
4150  } else if (err == 1) {
4151  /* Slice could not be decoded in parallel mode, copy down
4152  * NAL unit stuff to context 0 and restart. Note that
4153  * rbsp_buffer is not transferred, but since we no longer
4154  * run in parallel mode this should not be an issue. */
4155  h->nal_unit_type = hx->nal_unit_type;
4156  h->nal_ref_idc = hx->nal_ref_idc;
4157  hx = h;
4158  goto again;
4159  }
4160  }
4161  }
4162  if (context_count)
4163  execute_decode_slices(h, context_count);
4164 
4165 end:
4166  /* clean up */
4167  if (s->current_picture_ptr && s->current_picture_ptr->owner2 == s &&
4168  !s->droppable) {
4171  }
4172 
4173  return buf_index;
4174 }
4175 
4179 static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size)
4180 {
4181  if (pos == 0)
4182  pos = 1; // avoid infinite loops (i doubt that is needed but ...)
4183  if (pos + 10 > buf_size)
4184  pos = buf_size; // oops ;)
4185 
4186  return pos;
4187 }
4188 
4189 static int decode_frame(AVCodecContext *avctx, void *data,
4190  int *got_frame, AVPacket *avpkt)
4191 {
4192  const uint8_t *buf = avpkt->data;
4193  int buf_size = avpkt->size;
4194  H264Context *h = avctx->priv_data;
4195  MpegEncContext *s = &h->s;
4196  AVFrame *pict = data;
4197  int buf_index = 0;
4198 
4199  s->flags = avctx->flags;
4200  s->flags2 = avctx->flags2;
4201  /* reset data partitioning here, to ensure GetBitContexts from previous
4202  * packets do not get used. */
4203  s->data_partitioning = 0;
4204 
4205  /* end of stream, output what is still in the buffers */
4206 out:
4207  if (buf_size == 0) {
4208  Picture *out;
4209  int i, out_idx;
4210 
4212 
4213  // FIXME factorize this with the output code below
4214  out = h->delayed_pic[0];
4215  out_idx = 0;
4216  for (i = 1;
4217  h->delayed_pic[i] &&
4218  !h->delayed_pic[i]->f.key_frame &&
4219  !h->delayed_pic[i]->mmco_reset;
4220  i++)
4221  if (h->delayed_pic[i]->poc < out->poc) {
4222  out = h->delayed_pic[i];
4223  out_idx = i;
4224  }
4225 
4226  for (i = out_idx; h->delayed_pic[i]; i++)
4227  h->delayed_pic[i] = h->delayed_pic[i + 1];
4228 
4229  if (out) {
4230  *got_frame = 1;
4231  *pict = out->f;
4232  }
4233 
4234  return buf_index;
4235  }
4236 
4237  buf_index = decode_nal_units(h, buf, buf_size, 0);
4238  if (buf_index < 0)
4239  return -1;
4240 
4242  buf_size = 0;
4243  goto out;
4244  }
4245 
4246  if (!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr) {
4247  if (avctx->skip_frame >= AVDISCARD_NONREF)
4248  return 0;
4249  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
4250  return -1;
4251  }
4252 
4253  if (!(s->flags2 & CODEC_FLAG2_CHUNKS) ||
4254  (s->mb_y >= s->mb_height && s->mb_height)) {
4255  if (s->flags2 & CODEC_FLAG2_CHUNKS)
4256  decode_postinit(h, 1);
4257 
4258  field_end(h, 0);
4259  h->context_reinitialized = 0;
4260 
4261  if (!h->next_output_pic) {
4262  /* Wait for second field. */
4263  *got_frame = 0;
4264  } else {
4265  *got_frame = 1;
4266  *pict = h->next_output_pic->f;
4267  }
4268  }
4269 
4270  assert(pict->data[0] || !*got_frame);
4271  ff_print_debug_info(s, pict);
4272 
4273  return get_consumed_bytes(s, buf_index, buf_size);
4274 }
4275 
4277 {
4278  int i;
4279 
4280  free_tables(h, 1); // FIXME cleanup init stuff perhaps
4281 
4282  for (i = 0; i < MAX_SPS_COUNT; i++)
4283  av_freep(h->sps_buffers + i);
4284 
4285  for (i = 0; i < MAX_PPS_COUNT; i++)
4286  av_freep(h->pps_buffers + i);
4287 }
4288 
4290 {
4291  H264Context *h = avctx->priv_data;
4292  MpegEncContext *s = &h->s;
4293 
4295 
4296  ff_MPV_common_end(s);
4297 
4298  // memset(h, 0, sizeof(H264Context));
4299 
4300  return 0;
4301 }
4302 
4303 static const AVProfile profiles[] = {
4304  { FF_PROFILE_H264_BASELINE, "Baseline" },
4305  { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
4306  { FF_PROFILE_H264_MAIN, "Main" },
4307  { FF_PROFILE_H264_EXTENDED, "Extended" },
4308  { FF_PROFILE_H264_HIGH, "High" },
4309  { FF_PROFILE_H264_HIGH_10, "High 10" },
4310  { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
4311  { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
4312  { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
4313  { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
4314  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
4315  { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
4316  { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
4317  { FF_PROFILE_UNKNOWN },
4318 };
4319 
4321  .name = "h264",
4322  .type = AVMEDIA_TYPE_VIDEO,
4323  .id = AV_CODEC_ID_H264,
4324  .priv_data_size = sizeof(H264Context),
4327  .decode = decode_frame,
4328  .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 |
4331  .flush = flush_dpb,
4332  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
4333  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
4334  .update_thread_context = ONLY_IF_THREADS_ENABLED(decode_update_thread_context),
4335  .profiles = NULL_IF_CONFIG_SMALL(profiles),
4336 };
4337 
4338 #if CONFIG_H264_VDPAU_DECODER
4339 AVCodec ff_h264_vdpau_decoder = {
4340  .name = "h264_vdpau",
4341  .type = AVMEDIA_TYPE_VIDEO,
4342  .id = AV_CODEC_ID_H264,
4343  .priv_data_size = sizeof(H264Context),
4346  .decode = decode_frame,
4348  .flush = flush_dpb,
4349  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
4350  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
4351  AV_PIX_FMT_NONE},
4352  .profiles = NULL_IF_CONFIG_SMALL(profiles),
4353 };
4354 #endif
int chroma_format_idc
Definition: h264.h:150
Picture default_ref_list[2][32]
base reference list for all slices of a coded picture
Definition: h264.h:486
#define PICT_BOTTOM_FIELD
Definition: mpegvideo.h:640
#define CONFIG_H264_VDPAU_DECODER
Definition: config.h:420
void ff_h264_direct_dist_scale_factor(H264Context *const h)
Definition: h264_direct.c:52
enum AVPixelFormat ff_hwaccel_pixfmt_list_420[]
Definition: mpegvideo.c:133
GetBitContext inter_gb
Definition: h264.h:385
#define XCHG(a, b, xchg)
int video_signal_type_present_flag
Definition: h264.h:173
#define VERT_PRED8x8
Definition: h264pred.h:70
void ff_vdpau_h264_set_reference_frames(MpegEncContext *s)
Definition: vdpau.c:41
int last_slice_type
Definition: h264.h:530
int ff_h264_decode_mb_cabac(H264Context *h)
Decode a CABAC coded macroblock.
Definition: h264_cabac.c:1861
static void clone_tables(H264Context *dst, H264Context *src, int i)
Mimic alloc_tables(), but for every context thread.
Definition: h264.c:921
#define ER_AC_END
Definition: mpegvideo.h:498
const struct AVCodec * codec
Definition: avcodec.h:1348
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:61
int ff_h264_decode_seq_parameter_set(H264Context *h)
Decode SPS.
Definition: h264_ps.c:305
#define PICT_TOP_FIELD
Definition: mpegvideo.h:639
discard all frames except keyframes
Definition: avcodec.h:535
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3106
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2656
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:61
unsigned int top_samples_available
Definition: h264.h:286
static enum PixelFormat get_pixel_format(H264Context *h)
Definition: h264.c:2460
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata)
Definition: h264.c:3866
unsigned int topleft_samples_available
Definition: h264.h:285
#define DC_128_PRED8x8
Definition: h264pred.h:76
int single_decode_warning
1 if the single thread fallback warning has already been displayed, 0 otherwise.
Definition: h264.h:528
5: top field, bottom field, top field repeated, in that order
Definition: h264.h:138
static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264.c:1777
#define VERT_LEFT_PRED
Definition: h264pred.h:45
#define HAVE_THREADS
Definition: config.h:236
const uint8_t ff_zigzag_direct[64]
Definition: dsputil.c:59
int size
GetBitContext * intra_gb_ptr
Definition: h264.h:386
void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
Definition: mpegvideo.c:1283
This structure describes decoded (raw) audio or video data.
Definition: avcodec.h:989
int mb_aff_frame
Definition: h264.h:348
void ff_vdpau_h264_picture_complete(MpegEncContext *s)
Definition: vdpau.c:149
static void copy_parameter_set(void **to, void **from, int count, int size)
Definition: h264.c:1113
int delta_poc[2]
Definition: h264.h:464
#define IS_SUB_4X4(a)
Definition: mpegvideo.h:122
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:171
int last_qscale_diff
Definition: h264.h:406
#define LEFT_MBS
Definition: h264.h:66
mpeg2/4, h264 default
Definition: avcodec.h:585
int cbp
Definition: h264.h:401
3: top field, bottom field, in that order
Definition: h264.h:136
const uint8_t * ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length)
Decode a network abstraction layer unit.
Definition: h264.c:170
av_cold int ff_MPV_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:882
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:70
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:237
int weighted_bipred_idc
Definition: h264.h:215
int chroma_qp_index_offset[2]
Definition: h264.h:218
const uint8_t * bytestream_end
Definition: cabac.h:48
int left_type[LEFT_MBS]
Definition: h264.h:277
#define CHROMA422
Definition: h264.h:88
uint16_t * cbp_table
Definition: h264.h:400
int qscale_type
Definition: avcodec.h:1150
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:1043
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264.h:572
MMCO mmco[MAX_MMCO_COUNT]
memory management control operations buffer.
Definition: h264.h:496
static void align_get_bits(GetBitContext *s)
Definition: get_bits.h:412
7: frame doubling
Definition: h264.h:140
static av_always_inline void mc_part_weighted(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op, h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264.c:623
#define MAX_PPS_COUNT
Definition: h264.h:43
Sequence parameter set.
Definition: h264.h:147
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2079
static const uint8_t field_scan8x8[64]
Definition: h264data.h:115
static void init_dequant_tables(H264Context *h)
Definition: h264.c:848
int bitstream_restriction_flag
Definition: h264.h:184
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:154
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:213
#define FMO
Definition: h264.h:53
int num
numerator
Definition: rational.h:44
void(* h264_idct_add16intra)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:100
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: avcodec.h:1225
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
Definition: utils.c:149
int size
Definition: avcodec.h:916
enum AVCodecID codec_id
Definition: mpegvideo.h:227
int outputed_poc
Definition: h264.h:490
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
Definition: pixfmt.h:123
#define CONFIG_SVQ3_DECODER
Definition: config.h:492
const uint8_t * buffer
Definition: get_bits.h:53
Picture parameter set.
Definition: h264.h:207
void * thread_opaque
used by multithreading to store frame-specific info
Definition: avcodec.h:1294
static av_always_inline int dctcoef_get(DCTELEM *mb, int high_bit_depth, int index)
Definition: h264.c:1759
int field_picture
whether or not the picture was encoded in separate fields
Definition: mpegvideo.h:139
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1724
#define pass
Definition: fft.c:334
const uint8_t * field_scan8x8_q0
Definition: h264.h:422
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1533
int frame_mbs_only_flag
Definition: h264.h:163
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:55
int is_avc
Used to parse AVC variant of h264.
Definition: h264.h:447
int mmco_index
Definition: h264.h:497
static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264.c:1150
uint8_t zigzag_scan8x8_cavlc[64]
Definition: h264.h:414
mpegvideo header.
int ff_h264_get_profile(SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264.c:2393
uint32_t dequant8_buffer[6][QP_MAX_NUM+1][64]
Definition: h264.h:337
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:302
H264Context.
Definition: h264.h:254
discard all
Definition: avcodec.h:536
static int context_init(H264Context *h)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264.c:944
int mmco_reset
h264 MMCO_RESET set this 1. Reordering code must not mix pictures before and after MMCO_RESET...
Definition: mpegvideo.h:132
#define IS_INTRA4x4(a)
Definition: mpegvideo.h:105
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264.h:466
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2711
uint32_t num_units_in_tick
Definition: h264.h:180
struct H264Context H264Context
H264Context.
4: bottom field, top field, in that order
Definition: h264.h:137
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:252
static int get_lowest_part_list_y(H264Context *h, Picture *pic, int n, int height, int y_offset, int list)
Definition: h264.c:295
int profile
profile
Definition: avcodec.h:2815
#define HOR_PRED8x8
Definition: h264pred.h:69
int stride
Definition: mace.c:144
AVCodec.
Definition: avcodec.h:2960
void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc)
Definition: svq3.c:175
int qscale
QP.
Definition: mpegvideo.h:342
#define AV_WN32A(p, v)
Definition: intreadwrite.h:458
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:344
#define AV_COPY32(d, s)
Definition: intreadwrite.h:506
static const uint8_t rem6[QP_MAX_NUM+1]
Definition: h264.c:50
#define IS_INTRA_PCM(a)
Definition: mpegvideo.h:111
int profile_idc
Definition: h264.h:148
unsigned current_sps_id
id of the current SPS
Definition: h264.h:328
static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int height, int delta, int list, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int src_x_offset, int src_y_offset, qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op, int pixel_shift, int chroma_idc)
Definition: h264.c:461
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:753
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:229
static const uint8_t zigzag_scan[16]
Definition: h264data.h:55
void ff_h264_init_cabac_states(H264Context *h)
Definition: h264_cabac.c:1262
#define USES_LIST(a, list)
does this mb use listX, note does not work if subMBs
Definition: mpegvideo.h:126
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1465
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:151
#define CONFIG_GRAY
Definition: config.h:277
Switching Intra.
Definition: avutil.h:249
uint8_t * chroma_pred_mode_table
Definition: h264.h:405
#define IS_DIR(a, part, list)
Definition: mpegvideo.h:125
static const uint8_t div6[QP_MAX_NUM+1]
Definition: h264.c:56
enum AVDiscard skip_frame
Definition: avcodec.h:2907
int ff_h264_decode_ref_pic_list_reordering(H264Context *h)
Definition: h264_refs.c:195
#define MAX_THREADS
Definition: mpegvideo.h:61
static const uint8_t golomb_to_pict_type[5]
Definition: h264data.h:38
#define AV_RN32A(p)
Definition: intreadwrite.h:446
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2622
int long_ref
1->long term reference 0->short term reference
Definition: mpegvideo.h:135
static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264.c:1895
static int decode_init_thread_copy(AVCodecContext *avctx)
Definition: h264.c:1128
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: utils.c:72
#define IS_8x8DCT(a)
Definition: h264.h:96
uint8_t scaling_matrix4[6][16]
Definition: h264.h:223
const uint8_t * bytestream
Definition: cabac.h:47
int ref2frm[MAX_SLICES][2][64]
reference to frame number lists, used in the loop filter, the first 2 are for -2,-1 ...
Definition: h264.h:381
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:50
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
Definition: h264.h:219
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:228
#define IS_INTER(a)
Definition: mpegvideo.h:109
DCTELEM mb_luma_dc[3][16 *2]
Definition: h264.h:390
uint32_t(*[6] dequant4_coeff)[16]
Definition: h264.h:338
int ff_MPV_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1069
uint8_t
int prev_frame_num_offset
for POC type 2
Definition: h264.h:469
int use_weight
Definition: h264.h:355
int mb_uvlinesize
Definition: h264.h:323
int full_range
Definition: h264.h:174
void(* h264_luma_dc_dequant_idct)(DCTELEM *output, DCTELEM *input, int qmul)
Definition: h264dsp.h:103
#define IS_8X16(a)
Definition: mpegvideo.h:117
int offset_for_non_ref_pic
Definition: h264.h:156
int context_reinitialized
Definition: h264.h:451
float delta
#define PICT_FRAME
Definition: mpegvideo.h:641
void ff_h264_reset_sei(H264Context *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:40
Definition: h264.h:110
int luma_weight[48][2][2]
Definition: h264.h:360
enum OutputFormat out_format
output format
Definition: mpegvideo.h:219
void(* qpel_mc_func)(uint8_t *dst, uint8_t *src, int stride)
Definition: dsputil.h:144
enum AVColorPrimaries color_primaries
Definition: h264.h:176
av_cold void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:326
void(* h264_idct_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:82
DCTELEM mb[16 *48 *2]
as a dct coeffecient is int32_t in high depth, we need to reserve twice the space.
Definition: h264.h:389
AVCodec ff_h264_decoder
Definition: h264.c:4320
Picture ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:378
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int cabac
entropy_coding_mode_flag
Definition: h264.h:209
int mb_xy
Definition: h264.h:427
Definition: h264.h:108
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:198
#define LUMA_DC_BLOCK_INDEX
Definition: h264.h:733
#define DIAG_DOWN_LEFT_PRED
Definition: h264pred.h:41
static const uint8_t dequant8_coeff_init[6][6]
Definition: h264data.h:263
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:375
unsigned int crop_right
frame_cropping_rect_right_offset
Definition: h264.h:168
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1454
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:313
void ff_h264_fill_mbaff_ref_list(H264Context *h)
Definition: h264_refs.c:309
#define TOP_DC_PRED
Definition: h264pred.h:50
const char data[16]
Definition: mxf.c:66
int transform_bypass
qpprime_y_zero_transform_bypass_flag
Definition: h264.h:151
uint8_t * data
Definition: avcodec.h:915
static int init_poc(H264Context *h)
Definition: h264.c:2175
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:192
static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
Identify the exact end of the bitstream.
Definition: h264.c:280
int left_mb_xy[LEFT_MBS]
Definition: h264.h:272
int top_mb_xy
Definition: h264.h:270
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:43
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: pixfmt.h:78
int ff_h264_get_slice_type(const H264Context *h)
Reconstruct bitstream slice_type.
Definition: h264.c:3290
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
Definition: h264.h:221
static const uint8_t dequant8_coeff_init_scan[16]
Definition: h264data.h:259
int flags2
AVCodecContext.flags2.
Definition: mpegvideo.h:231
int interlaced_frame
The content of the picture is interlaced.
Definition: avcodec.h:1232
#define MAX_DELAYED_PIC_COUNT
Definition: h264.h:47
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:247
void ff_MPV_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1581
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define MB_MBAFF
Definition: h264.h:62
Picture * next_output_pic
Definition: h264.h:489
static av_cold void common_init(H264Context *h)
Definition: h264.c:967
#define AV_COPY64(d, s)
Definition: intreadwrite.h:510
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2086
int luma_log2_weight_denom
Definition: h264.h:357
#define IS_INTERLACED(a)
Definition: mpegvideo.h:112
static int h264_set_parameter_from_sps(H264Context *h)
Definition: h264.c:2413
int chroma_weight[48][2][2][2]
Definition: h264.h:361
static int init(AVCodecParserContext *s)
Definition: h264_parser.c:335
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264.h:488
static void init_dequant4_coeff_table(H264Context *h)
Definition: h264.c:822
#define r
Definition: input.c:51
void(* pred8x8l_add[2])(uint8_t *pix, const DCTELEM *block, ptrdiff_t stride)
Definition: h264pred.h:102
const uint8_t * zigzag_scan8x8_cavlc_q0
Definition: h264.h:420
H.264 / AVC / MPEG4 part10 codec.
int frame_num
Definition: h264.h:465
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:547
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
Definition: pthread.c:702
enum AVCodecID id
Definition: avcodec.h:2974
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:289
int mb_aff
mb_adaptive_frame_field_flag
Definition: h264.h:164
enum AVColorTransferCharacteristic color_trc
Definition: h264.h:177
H264PredContext hpc
Definition: h264.h:284
void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:399
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:128
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1634
static int get_ue_golomb(GetBitContext *gb)
read unsigned exp golomb code.
Definition: golomb.h:53
static int decode_slice_header(H264Context *h, H264Context *h0)
Decode a slice header.
Definition: h264.c:2591
const uint8_t * zigzag_scan_q0
Definition: h264.h:418
int poc_type
pic_order_cnt_type
Definition: h264.h:153
Multithreading support functions.
void av_log_ask_for_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message asking for a sample.
void(* h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int height, int log2_denom, int weightd, int weights, int offset)
Definition: h264dsp.h:36
static const uint16_t mask[17]
Definition: lzw.c:38
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264.c:1978
int reference
is this picture used as reference The values for this are the same as the MpegEncContext.picture_structure variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
Definition: avcodec.h:1132
int chroma_y_shift
Definition: mpegvideo.h:657
int nal_unit_type
Definition: h264.h:440
int use_weight_chroma
Definition: h264.h:356
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:567
int num_reorder_frames
Definition: h264.h:185
#define AV_RB16
Definition: intreadwrite.h:53
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:63
discard all bidirectional frames
Definition: avcodec.h:534
void ff_h264_direct_ref_list_init(H264Context *const h)
Definition: h264_direct.c:106
#define DC_128_PRED
Definition: h264pred.h:51
#define LEFT_DC_PRED
Definition: h264pred.h:49
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale)
Get the chroma qp.
Definition: h264.h:774
GetBitContext * inter_gb_ptr
Definition: h264.h:387
void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp)
Definition: svq3.c:140
static void copy_picture_range(Picture **to, Picture **from, int count, MpegEncContext *new_base, MpegEncContext *old_base)
Definition: h264.c:1098
#define ER_MB_ERROR
Definition: mpegvideo.h:502
#define ALZHEIMER_DC_L0T_PRED8x8
Definition: h264pred.h:79
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:358
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:88
Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264.h:487
#define IS_SUB_8X4(a)
Definition: mpegvideo.h:120
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2752
int mb_field_decoding_flag
Definition: h264.h:349
static void flush_dpb(AVCodecContext *avctx)
Definition: h264.c:2160
static int h264_slice_header_init(H264Context *, int)
Definition: h264.c:2511
int capabilities
Codec capabilities.
Definition: avcodec.h:2979
int emu_edge_width
Definition: h264.h:325
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:337
uint8_t * base[AV_NUM_DATA_POINTERS]
pointer to the first allocated byte of the picture.
Definition: avcodec.h:1073
#define s0
Definition: regdef.h:37
PPS pps
current pps
Definition: h264.h:334
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: mpegvideo.h:720
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:407
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264.h:544
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1434
int direct_spatial_mv_pred
Definition: h264.h:364
0: frame
Definition: h264.h:133
simple assert() macros that are a bit more flexible than ISO C assert().
void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:36
int weighted_pred
weighted_pred_flag
Definition: h264.h:214
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:146
static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264.c:4179
const char * name
Name of the codec implementation.
Definition: avcodec.h:2967
#define T(x)
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
Definition: dsputil.h:145
static void predict_field_decoding_flag(H264Context *h)
Definition: h264.c:3609
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:557
#define IS_INTRA(a)
Definition: mpegvideo.h:108
int ff_h264_decode_mb_cavlc(H264Context *h)
Decode a macroblock.
Definition: h264_cavlc.c:695
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:570
static int square(int x)
Definition: roqvideoenc.c:111
GetBitContext gb
Definition: mpegvideo.h:626
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
Definition: h264.h:377
int delta_pic_order_always_zero_flag
Definition: h264.h:155
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:244
int new
flag to keep track if the decoder context needs re-init due to changed SPS
Definition: h264.h:201
int dct_bits
Size of DCT coefficients.
Definition: dsputil.h:198
int offset_for_top_to_bottom_field
Definition: h264.h:157
#define IN_RANGE(a, b, size)
Definition: h264.c:1096
int off
Definition: dsputil_bfin.c:28
uint8_t zigzag_scan8x8[64]
Definition: h264.h:413
int picture_count
number of allocated pictures (MAX_PICTURE_COUNT * avctx->thread_count)
Definition: mpegvideo.h:318
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:69
static const uint8_t scan8[16 *3+3]
Definition: h264.h:737
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2498
void(* add_pixels8)(uint8_t *pixels, DCTELEM *block, int line_size)
Definition: dsputil.h:206
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:128
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:505
the normal 2^n-1 "JPEG" YUV ranges
Definition: avcodec.h:574
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:36
uint8_t * direct_table
Definition: h264.h:409
static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int chroma444, int simple, int pixel_shift)
Definition: h264.c:1670
uint8_t scaling_matrix8[6][64]
Definition: h264.h:224
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264.h:448
useful rectangle filling function
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: avcodec.h:573
void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
int refs
number of reference frames
Definition: avcodec.h:2022
CABACContext cabac
Cabac.
Definition: h264.h:396
unsigned int left_samples_available
Definition: h264.h:288
#define IS_8X8(a)
Definition: mpegvideo.h:118
int err_recognition
Definition: mpegvideo.h:510
#define FRAME_MBAFF
Definition: h264.h:64
int ref_frame_count
num_ref_frames
Definition: h264.h:159
Picture * long_ref[32]
Definition: h264.h:485
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2981
static DCTELEM block[64]
Definition: dct-test.c:169
enum AVPictureType pict_type
Picture type of the frame, see ?_TYPE below.
Definition: avcodec.h:1065
void ff_h264_filter_mb(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
int frame_num_offset
for POC type 2
Definition: h264.h:468
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2602
int x264_build
Definition: h264.h:425
uint32_t * mb2br_xy
Definition: h264.h:319
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264.h:342
uint8_t field_scan8x8_cavlc[64]
Definition: h264.h:417
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
Definition: h264.h:158
int colour_description_present_flag
Definition: h264.h:175
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:77
int poc
h264 frame POC
Definition: mpegvideo.h:130
AVRational sar
Definition: h264.h:172
int redundant_pic_count
Definition: h264.h:482
static const uint8_t field_scan8x8_cavlc[64]
Definition: h264data.h:134
int width
picture width / height.
Definition: avcodec.h:1508
static av_always_inline void fill_filter_caches_inter(H264Context *h, MpegEncContext *const s, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264.c:3308
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:317
int long_ref_count
number of actual long term references
Definition: h264.h:500
void ff_copy_picture(Picture *dst, Picture *src)
Definition: mpegvideo.c:223
Picture.
Definition: mpegvideo.h:94
qpel_mc_func avg_2tap_qpel_pixels_tab[4][16]
Definition: dsputil.h:328
void(* pred4x4_add[2])(uint8_t *pix, const DCTELEM *block, ptrdiff_t stride)
Definition: h264pred.h:100
int cabac_init_idc
Definition: h264.h:503
static void implicit_weight_table(H264Context *h, int field)
Initialize implicit_weight table.
Definition: h264.c:2066
int size_in_bits
Definition: get_bits.h:55
SPS sps
current sps
Definition: h264.h:329
int32_t
PPS * pps_buffers[MAX_PPS_COUNT]
Definition: h264.h:454
static av_always_inline void prefetch_motion(H264Context *h, int list, int pixel_shift, int chroma_idc)
Definition: h264.c:731
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2058
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
Definition: avplay.c:401
#define MB_FIELD
Definition: h264.h:63
#define MAX_SPS_COUNT
Definition: h264.h:42
void ff_er_frame_end(MpegEncContext *s)
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
Decode PPS.
Definition: h264_ps.c:495
void ff_vdpau_h264_picture_start(MpegEncContext *s)
Definition: vdpau.c:130
int emu_edge_height
Definition: h264.h:326
Context Adaptive Binary Arithmetic Coder inline functions.
int level
level
Definition: avcodec.h:2885
int init_qp
pic_init_qp_minus26 + 26
Definition: h264.h:216
int frame_num
h264 frame_num (raw frame_num from slice header)
Definition: mpegvideo.h:131
int mmco_reset
Definition: h264.h:498
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:282
uint8_t * bipred_scratchpad
Definition: h264.h:580
MotionEstContext me
Definition: mpegvideo.h:405
#define ER_AC_ERROR
Definition: mpegvideo.h:495
int poc_lsb
Definition: h264.h:461
int max_pic_num
max_frame_num or 2 * max_frame_num for field pics.
Definition: h264.h:480
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1474
static const uint8_t field_scan[16]
Definition: h264data.h:62
int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
Definition: h264_refs.c:499
int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
Definition: mpegvideo.c:245
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264.c:3655
unsigned int topright_samples_available
Definition: h264.h:287
#define AV_WN16A(p, v)
Definition: intreadwrite.h:454
const uint8_t * zigzag_scan8x8_q0
Definition: h264.h:419
int curr_pic_num
frame_num for frames or 2 * frame_num + 1 for field pics.
Definition: h264.h:475
int slice_type
Definition: h264.h:343
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264.c:2256
Definition: h264.h:105
static int av_unused get_cabac_terminate(CABACContext *c)
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: mpegvideo.h:87
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function for encode/decode called after coding/decoding the header and before a frame is code...
Definition: mpegvideo.c:1370
int top_type
Definition: h264.h:275
static void loop_filter(H264Context *h, int start_x, int end_x)
Definition: h264.c:3539
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:570
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
uint32_t dequant4_buffer[6][QP_MAX_NUM+1][16]
Definition: h264.h:336
void(* h264_idct8_dc_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:88
void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:45
#define PART_NOT_AVAILABLE
Definition: h264.h:305
unsigned int list_count
Definition: h264.h:376
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2733
#define IS_16X8(a)
Definition: mpegvideo.h:116
void ff_vdpau_add_data_chunk(MpegEncContext *s, const uint8_t *buf, int buf_size)
Definition: vdpau.c:110
GetBitContext intra_gb
Definition: h264.h:384
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:663
int dequant_coeff_pps
reinit tables when pps changes
Definition: h264.h:456
int pic_order_present
pic_order_present_flag
Definition: h264.h:210
static void idct_add(uint8_t *dest, int line_size, DCTELEM *block)
Definition: dsputil_sh4.c:73
SPS * sps_buffers[MAX_SPS_COUNT]
Definition: h264.h:453
void(* h264_idct8_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:84
struct H264Context * thread_context[MAX_THREADS]
Definition: h264.h:509
int chroma_log2_weight_denom
Definition: h264.h:358
uint32_t * mb_type
macroblock type table mb_type_base + mb_width + 2
Definition: avcodec.h:1180
static void flush_change(H264Context *h)
Definition: h264.c:2141
short offset_for_ref_frame[256]
Definition: h264.h:183
int timing_info_present_flag
Definition: h264.h:179
NULL
Definition: eval.c:52
static void decode_finish_row(H264Context *h)
Draw edges and report progress for the last MB row.
Definition: h264.c:3623
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:535
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:288
external API header
H264 / AVC / MPEG4 part10 codec data table
int ff_h264_frame_start(H264Context *h)
Definition: h264.c:1287
MpegEncContext s
Definition: h264.h:255
Definition: h264.h:109
void ff_thread_await_progress(AVFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
Definition: pthread.c:684
int slice_alpha_c0_offset
Definition: h264.h:433
1: top field
Definition: h264.h:134
enum AVCodecID codec_id
Definition: avcodec.h:1350
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:96
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:440
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:470
int linesize[AV_NUM_DATA_POINTERS]
Size, in bytes, of the data for each picture/channel plane.
Definition: avcodec.h:1008
static void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int height, int y_offset, int list0, int list1, int *nrefs)
Definition: h264.c:308
int next_outputed_poc
Definition: h264.h:491
#define LTOP
Definition: h264.h:67
int ff_h264_decode_sei(H264Context *h)
Decode SEI.
Definition: h264_sei.c:164
int poc_msb
Definition: h264.h:462
int debug
debug
Definition: avcodec.h:2568
int implicit_weight[48][48][2]
Definition: h264.h:362
int max_contexts
Max number of threads / contexts.
Definition: h264.h:522
main external API structure.
Definition: avcodec.h:1339
static void(WINAPI *cond_broadcast)(pthread_cond_t *cond)
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:326
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:80
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
Definition: h264.c:1359
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:865
2: bottom field
Definition: h264.h:135
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:215
#define QP_MAX_NUM
Definition: h264.h:98
static enum AVPixelFormat hwaccel_pixfmt_list_h264_jpeg_420[]
Definition: h264.c:62
int16_t(*[2] motion_val)[2]
motion vector table
Definition: avcodec.h:1172
static void init_dequant8_coeff_table(H264Context *h)
Definition: h264.c:795
qpel_mc_func put_2tap_qpel_pixels_tab[4][16]
Definition: dsputil.h:327
Picture * picture
main picture buffer
Definition: mpegvideo.h:255
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:566
int extradata_size
Definition: avcodec.h:1455
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstre...
Definition: pixfmt.h:103
int constraint_set_flags
constraint_set[0-3]_flag
Definition: h264.h:200
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:268
SEI_PicStructType sei_pic_struct
pic_struct in picture timing SEI message
Definition: h264.h:536
Picture * short_ref[32]
Definition: h264.h:484
void(* h264_idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:86
Switching Predicted.
Definition: avutil.h:250
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1580
int slice_beta_offset
Definition: h264.h:434
#define ER_MB_END
Definition: mpegvideo.h:503
const uint8_t * field_scan8x8_cavlc_q0
Definition: h264.h:423
void ff_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2399
int index
Definition: gxfenc.c:72
uint32_t(*[6] dequant8_coeff)[64]
Definition: h264.h:339
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264.c:4289
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264.h:260
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2072
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2065
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:372
int context_initialized
Definition: mpegvideo.h:242
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
Definition: h264.h:257
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, int first_slice)
Definition: h264_refs.c:694
void ff_er_frame_start(MpegEncContext *s)
static void await_references(H264Context *h)
Wait until all reference frames are available for MC operations.
Definition: h264.c:352
AVHWAccel * ff_find_hwaccel(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt)
Return the hardware accelerated codec for codec codec_id and pixel format pix_fmt.
Definition: utils.c:2048
void(* h264_weight_func)(uint8_t *block, int stride, int height, int log2_denom, int weight, int offset)
Definition: h264dsp.h:34
int8_t * ref_index[2]
motion reference frame index the order in which these are stored can depend on the codec...
Definition: avcodec.h:1195
DSPContext dsp
pointers for accelerated dsp functions
Definition: mpegvideo.h:361
#define s1
Definition: regdef.h:38
unsigned int sps_id
Definition: h264.h:208
#define CABAC
Definition: h264.h:85
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:154
6: bottom field, top field, bottom field repeated, in that order
Definition: h264.h:139
short DCTELEM
Definition: dsputil.h:39
void(* h264_idct8_add4)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:94
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:132
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:349
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264.h:316
uint32_t time_scale
Definition: h264.h:181
int field_poc[2]
h264 top/bottom POC
Definition: mpegvideo.h:129
int transform_8x8_mode
transform_8x8_mode_flag
Definition: h264.h:222
static int pred_weight_table(H264Context *h)
Definition: h264.c:1998
int pic_struct_present_flag
Definition: h264.h:191
Definition: h264.h:103
uint8_t zigzag_scan[16]
Definition: h264.h:412
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:4276
qpel_mc_func put_h264_qpel_pixels_tab[4][16]
Definition: dsputil.h:324
#define FIELD_OR_MBAFF_PICTURE
Definition: h264.h:82
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: avcodec.h:997
int8_t * qscale_table
QP table.
Definition: avcodec.h:1139
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:106
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:199
void ff_h264_filter_mb_fast(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
#define LBOT
Definition: h264.h:68
Definition: vf_drawbox.c:36
void(* clear_blocks)(DCTELEM *blocks)
Definition: dsputil.h:219
#define AV_ZERO128(d)
Definition: intreadwrite.h:542
struct MpegEncContext * owner2
pointer to the MpegEncContext that allocated this picture
Definition: mpegvideo.h:148
int height
Definition: gxfenc.c:72
MpegEncContext.
Definition: mpegvideo.h:211
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:316
struct AVCodecContext * avctx
Definition: mpegvideo.h:213
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264.c:2131
static int field_end(H264Context *h, int in_setup)
Definition: h264.c:2290
hardware decoding through VDA
Definition: pixfmt.h:153
discard all non reference
Definition: avcodec.h:533
int is_complex
Definition: h264.h:429
int mb_height
pic_height_in_map_units_minus1 + 1
Definition: h264.h:162
qpel_mc_func avg_h264_qpel_pixels_tab[4][16]
Definition: dsputil.h:325
uint8_t * rbsp_buffer[2]
Definition: h264.h:441
static const uint8_t dequant4_coeff_init[6][3]
Definition: h264data.h:250
#define tprintf(p,...)
Definition: get_bits.h:613
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:248
#define AV_COPY128(d, s)
Definition: intreadwrite.h:514
#define FIELD_PICTURE
Definition: h264.h:65
#define MAX_SLICES
Definition: dxva2_mpeg2.c:25
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1772
uint16_t * slice_table_base
Definition: h264.h:458
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:152
H.264 / AVC / MPEG4 part10 motion vector predicion.
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:295
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:315
Bi-dir predicted.
Definition: avutil.h:247
AVProfile.
Definition: avcodec.h:2948
static int execute_decode_slices(H264Context *h, int context_count)
Call decode_slice() for each context.
Definition: h264.c:3825
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:79
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2517
int cur_chroma_format_idc
Definition: h264.h:579
enum AVDiscard skip_loop_filter
Definition: avcodec.h:2893
int den
denominator
Definition: rational.h:45
#define CONFIG_SMALL
Definition: config.h:316
int chroma_qp[2]
Definition: h264.h:258
int sei_ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264.h:551
uint16_t sub_mb_type[4]
Definition: h264.h:352
int bit_depth_luma
bit_depth_luma_minus8 + 8
Definition: h264.h:197
DSP utils.
int intra16x16_pred_mode
Definition: h264.h:267
void * priv_data
Definition: avcodec.h:1382
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264.h:467
#define IS_SUB_4X8(a)
Definition: mpegvideo.h:121
int picture_structure
Definition: mpegvideo.h:637
Definition: h264.h:104
void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:124
static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264.c:1573
Definition: h264.h:107
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:2773
VideoDSPContext vdsp
Definition: mpegvideo.h:362
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: avcodec.h:1239
#define IS_SUB_8X8(a)
Definition: mpegvideo.h:119
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264.c:4189
const uint16_t ff_h264_mb_sizes[4]
Definition: h264.c:48
#define IS_DIRECT(a)
Definition: mpegvideo.h:113
void ff_MPV_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1141
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
Print debugging info for the given picture.
Definition: mpegvideo.c:1743
void(* h264_idct_add16)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:91
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:295
int ff_h264_decode_extradata(H264Context *h)
Definition: h264.c:989
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264.h:289
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:506
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1390
void ff_init_cabac_states(CABACContext *c)
Definition: cabac.c:139
static int fill_filter_caches(H264Context *h, int mb_type)
Definition: h264.c:3393
void(* add_pixels4)(uint8_t *pixels, DCTELEM *block, int line_size)
Definition: dsputil.h:207
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:301
int key_frame
1 -> keyframe, 0-> not
Definition: avcodec.h:1058
#define AV_ZERO32(d)
Definition: intreadwrite.h:534
int linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:253
int current_slice
current slice number, used to initalize slice_num of each thread/context
Definition: h264.h:514
int mb_width
pic_width_in_mbs_minus1 + 1
Definition: h264.h:161
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1441
#define IS_16X16(a)
Definition: mpegvideo.h:115
#define AV_RN16A(p)
Definition: intreadwrite.h:442
uint32_t * mb2b_xy
Definition: h264.h:318
int slice_type_fixed
Definition: h264.h:345
struct AVFrame f
Definition: mpegvideo.h:95
int delta_poc_bottom
Definition: h264.h:463
const uint8_t * field_scan_q0
Definition: h264.h:421
static void free_tables(H264Context *h, int free_rbsp)
Definition: h264.c:758
int ff_h264_fill_default_ref_list(H264Context *h)
Fill the default_ref_list.
Definition: h264_refs.c:110
H264DSPContext h264dsp
Definition: h264.h:256
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:230
uint8_t field_scan8x8[64]
Definition: h264.h:416
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:236
#define copy_fields(to, from, start_field, end_field)
Definition: h264.c:1142
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264.h:573
static av_always_inline void mc_part_std(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264.c:575
int8_t * intra4x4_pred_mode
Definition: h264.h:283
static av_always_inline void dctcoef_set(DCTELEM *mb, int high_bit_depth, int index, int value)
Definition: h264.c:1768
void ff_thread_report_progress(AVFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread.c:666
int mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264.h:322
static int clone_slice(H264Context *dst, H264Context *src)
Replicate H264 "master" context to thread contexts.
Definition: h264.c:2351
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3119
8: frame tripling
Definition: h264.h:141
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264.h:432
#define AV_RN64A(p)
Definition: intreadwrite.h:450
#define LIST_NOT_USED
Definition: h264.h:304
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:3130
uint8_t(* non_zero_count)[48]
Definition: h264.h:297
unsigned int crop_bottom
frame_cropping_rect_bottom_offset
Definition: h264.h:170
exp golomb vlc stuff
int slice_num
Definition: h264.h:341
int uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:254
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:898
int sei_recovery_frame_cnt
recovery_frame_cnt from SEI message
Definition: h264.h:570
static const uint8_t zigzag_scan8x8_cavlc[64]
Definition: h264data.h:96
int level_idc
Definition: h264.h:149
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2547
void(* pred8x8l[9+3])(uint8_t *src, int topleft, int topright, ptrdiff_t stride)
Definition: h264pred.h:95
#define STARTCODE_TEST
int nal_ref_idc
Definition: h264.h:439
void(* pred16x16_add[3])(uint8_t *pix, const int *block_offset, const DCTELEM *block, ptrdiff_t stride)
Definition: h264pred.h:107
uint8_t field_scan[16]
Definition: h264.h:415
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:69
int b_stride
Definition: h264.h:320
Predicted.
Definition: avutil.h:246
unsigned int rbsp_buffer_size[2]
Definition: h264.h:442
#define CHROMA444
Definition: h264.h:89
Context Adaptive Binary Arithmetic Coder.
void ff_MPV_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:718
int8_t ref_cache[2][5 *8]
Definition: h264.h:303
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264.h:350
int short_ref_count
number of actual short term references
Definition: h264.h:501
static const AVProfile profiles[]
Definition: h264.c:4303
if(!(ptr_align%ac->ptr_align)&&samples_align >=aligned_len)
enum AVColorSpace colorspace
Definition: h264.h:178