Libav
h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
28 #include "libavutil/avassert.h"
29 #include "libavutil/display.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/stereo3d.h"
32 #include "libavutil/timer.h"
33 #include "internal.h"
34 #include "cabac.h"
35 #include "cabac_functions.h"
36 #include "error_resilience.h"
37 #include "avcodec.h"
38 #include "h264.h"
39 #include "h264data.h"
40 #include "h264chroma.h"
41 #include "h264_mvpred.h"
42 #include "golomb.h"
43 #include "mathops.h"
44 #include "me_cmp.h"
45 #include "mpegutils.h"
46 #include "rectangle.h"
47 #include "svq3.h"
48 #include "thread.h"
49 
50 #include <assert.h>
51 
52 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
53 
54 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
55  int (*mv)[2][4][2],
56  int mb_x, int mb_y, int mb_intra, int mb_skipped)
57 {
58  H264Context *h = opaque;
59 
60  h->mb_x = mb_x;
61  h->mb_y = mb_y;
62  h->mb_xy = mb_x + mb_y * h->mb_stride;
63  memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
64  assert(ref >= 0);
65  /* FIXME: It is possible albeit uncommon that slice references
66  * differ between slices. We take the easy approach and ignore
67  * it for now. If this turns out to have any relevance in
68  * practice then correct remapping should be added. */
69  if (ref >= h->ref_count[0])
70  ref = 0;
71  fill_rectangle(&h->cur_pic.ref_index[0][4 * h->mb_xy],
72  2, 2, 2, ref, 1);
73  fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
74  fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
75  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
76  assert(!FRAME_MBAFF(h));
78 }
79 
81 {
82  AVCodecContext *avctx = h->avctx;
83  AVFrame *cur = &h->cur_pic.f;
84  AVFrame *last = h->ref_list[0][0].f.data[0] ? &h->ref_list[0][0].f : NULL;
85  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
86  int vshift = desc->log2_chroma_h;
87  const int field_pic = h->picture_structure != PICT_FRAME;
88  if (field_pic) {
89  height <<= 1;
90  y <<= 1;
91  }
92 
93  height = FFMIN(height, avctx->height - y);
94 
95  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
96  return;
97 
98  if (avctx->draw_horiz_band) {
99  AVFrame *src;
100  int offset[AV_NUM_DATA_POINTERS];
101  int i;
102 
103  if (cur->pict_type == AV_PICTURE_TYPE_B || h->low_delay ||
105  src = cur;
106  else if (last)
107  src = last;
108  else
109  return;
110 
111  offset[0] = y * src->linesize[0];
112  offset[1] =
113  offset[2] = (y >> vshift) * src->linesize[1];
114  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
115  offset[i] = 0;
116 
117  emms_c();
118 
119  avctx->draw_horiz_band(avctx, src, offset,
120  y, h->picture_structure, height);
121  }
122 }
123 
129 {
130  static const int8_t top[12] = {
131  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
132  };
133  static const int8_t left[12] = {
134  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
135  };
136  int i;
137 
138  if (!(h->top_samples_available & 0x8000)) {
139  for (i = 0; i < 4; i++) {
140  int status = top[h->intra4x4_pred_mode_cache[scan8[0] + i]];
141  if (status < 0) {
143  "top block unavailable for requested intra4x4 mode %d at %d %d\n",
144  status, h->mb_x, h->mb_y);
145  return AVERROR_INVALIDDATA;
146  } else if (status) {
147  h->intra4x4_pred_mode_cache[scan8[0] + i] = status;
148  }
149  }
150  }
151 
152  if ((h->left_samples_available & 0x8888) != 0x8888) {
153  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
154  for (i = 0; i < 4; i++)
155  if (!(h->left_samples_available & mask[i])) {
156  int status = left[h->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
157  if (status < 0) {
159  "left block unavailable for requested intra4x4 mode %d at %d %d\n",
160  status, h->mb_x, h->mb_y);
161  return AVERROR_INVALIDDATA;
162  } else if (status) {
163  h->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
164  }
165  }
166  }
167 
168  return 0;
169 } // FIXME cleanup like ff_h264_check_intra_pred_mode
170 
175 int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
176 {
177  static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
178  static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
179 
180  if (mode > 3U) {
182  "out of range intra chroma pred mode at %d %d\n",
183  h->mb_x, h->mb_y);
184  return AVERROR_INVALIDDATA;
185  }
186 
187  if (!(h->top_samples_available & 0x8000)) {
188  mode = top[mode];
189  if (mode < 0) {
191  "top block unavailable for requested intra mode at %d %d\n",
192  h->mb_x, h->mb_y);
193  return AVERROR_INVALIDDATA;
194  }
195  }
196 
197  if ((h->left_samples_available & 0x8080) != 0x8080) {
198  mode = left[mode];
199  if (is_chroma && (h->left_samples_available & 0x8080)) {
200  // mad cow disease mode, aka MBAFF + constrained_intra_pred
201  mode = ALZHEIMER_DC_L0T_PRED8x8 +
202  (!(h->left_samples_available & 0x8000)) +
203  2 * (mode == DC_128_PRED8x8);
204  }
205  if (mode < 0) {
207  "left block unavailable for requested intra mode at %d %d\n",
208  h->mb_x, h->mb_y);
209  return AVERROR_INVALIDDATA;
210  }
211  }
212 
213  return mode;
214 }
215 
217  int *dst_length, int *consumed, int length)
218 {
219  int i, si, di;
220  uint8_t *dst;
221  int bufidx;
222 
223  // src[0]&0x80; // forbidden bit
224  h->nal_ref_idc = src[0] >> 5;
225  h->nal_unit_type = src[0] & 0x1F;
226 
227  src++;
228  length--;
229 
230 #define STARTCODE_TEST \
231  if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
232  if (src[i + 2] != 3) { \
233  /* startcode, so we must be past the end */ \
234  length = i; \
235  } \
236  break; \
237  }
238 
239 #if HAVE_FAST_UNALIGNED
240 #define FIND_FIRST_ZERO \
241  if (i > 0 && !src[i]) \
242  i--; \
243  while (src[i]) \
244  i++
245 
246 #if HAVE_FAST_64BIT
247  for (i = 0; i + 1 < length; i += 9) {
248  if (!((~AV_RN64A(src + i) &
249  (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
250  0x8000800080008080ULL))
251  continue;
252  FIND_FIRST_ZERO;
254  i -= 7;
255  }
256 #else
257  for (i = 0; i + 1 < length; i += 5) {
258  if (!((~AV_RN32A(src + i) &
259  (AV_RN32A(src + i) - 0x01000101U)) &
260  0x80008080U))
261  continue;
262  FIND_FIRST_ZERO;
264  i -= 3;
265  }
266 #endif
267 #else
268  for (i = 0; i + 1 < length; i += 2) {
269  if (src[i])
270  continue;
271  if (i > 0 && src[i - 1] == 0)
272  i--;
274  }
275 #endif
276 
277  if (i >= length - 1) { // no escaped 0
278  *dst_length = length;
279  *consumed = length + 1; // +1 for the header
280  return src;
281  }
282 
283  // use second escape buffer for inter data
284  bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0;
285  av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx],
287  dst = h->rbsp_buffer[bufidx];
288 
289  if (!dst)
290  return NULL;
291 
292  memcpy(dst, src, i);
293  si = di = i;
294  while (si + 2 < length) {
295  // remove escapes (very rare 1:2^22)
296  if (src[si + 2] > 3) {
297  dst[di++] = src[si++];
298  dst[di++] = src[si++];
299  } else if (src[si] == 0 && src[si + 1] == 0) {
300  if (src[si + 2] == 3) { // escape
301  dst[di++] = 0;
302  dst[di++] = 0;
303  si += 3;
304  continue;
305  } else // next start code
306  goto nsc;
307  }
308 
309  dst[di++] = src[si++];
310  }
311  while (si < length)
312  dst[di++] = src[si++];
313 
314 nsc:
315  memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
316 
317  *dst_length = di;
318  *consumed = si + 1; // +1 for the header
319  /* FIXME store exact number of bits in the getbitcontext
320  * (it is needed for decoding) */
321  return dst;
322 }
323 
328 static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
329 {
330  int v = *src;
331  int r;
332 
333  tprintf(h->avctx, "rbsp trailing %X\n", v);
334 
335  for (r = 1; r < 9; r++) {
336  if (v & 1)
337  return r;
338  v >>= 1;
339  }
340  return 0;
341 }
342 
343 void ff_h264_free_tables(H264Context *h, int free_rbsp)
344 {
345  int i;
346  H264Context *hx;
347 
350  av_freep(&h->cbp_table);
351  av_freep(&h->mvd_table[0]);
352  av_freep(&h->mvd_table[1]);
353  av_freep(&h->direct_table);
356  h->slice_table = NULL;
357  av_freep(&h->list_counts);
358 
359  av_freep(&h->mb2b_xy);
360  av_freep(&h->mb2br_xy);
361 
366 
367  if (free_rbsp && h->DPB) {
368  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
369  ff_h264_unref_picture(h, &h->DPB[i]);
370  av_freep(&h->DPB);
371  } else if (h->DPB) {
372  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
373  h->DPB[i].needs_realloc = 1;
374  }
375 
376  h->cur_pic_ptr = NULL;
377 
378  for (i = 0; i < H264_MAX_THREADS; i++) {
379  hx = h->thread_context[i];
380  if (!hx)
381  continue;
382  av_freep(&hx->top_borders[1]);
383  av_freep(&hx->top_borders[0]);
386  av_freep(&hx->dc_val_base);
387  av_freep(&hx->er.mb_index2xy);
389  av_freep(&hx->er.er_temp_buffer);
390  av_freep(&hx->er.mbintra_table);
391  av_freep(&hx->er.mbskip_table);
392 
393  if (free_rbsp) {
394  av_freep(&hx->rbsp_buffer[1]);
395  av_freep(&hx->rbsp_buffer[0]);
396  hx->rbsp_buffer_size[0] = 0;
397  hx->rbsp_buffer_size[1] = 0;
398  }
399  if (i)
400  av_freep(&h->thread_context[i]);
401  }
402 }
403 
405 {
406  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
407  const int row_mb_num = h->mb_stride * 2 * h->avctx->thread_count;
408  int x, y, i;
409 
411  row_mb_num * 8 * sizeof(uint8_t), fail)
413  big_mb_num * 48 * sizeof(uint8_t), fail)
415  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
417  big_mb_num * sizeof(uint16_t), fail)
419  big_mb_num * sizeof(uint8_t), fail)
421  16 * row_mb_num * sizeof(uint8_t), fail);
423  16 * row_mb_num * sizeof(uint8_t), fail);
425  4 * big_mb_num * sizeof(uint8_t), fail);
427  big_mb_num * sizeof(uint8_t), fail)
428 
429  memset(h->slice_table_base, -1,
430  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
431  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
432 
434  big_mb_num * sizeof(uint32_t), fail);
436  big_mb_num * sizeof(uint32_t), fail);
437  for (y = 0; y < h->mb_height; y++)
438  for (x = 0; x < h->mb_width; x++) {
439  const int mb_xy = x + y * h->mb_stride;
440  const int b_xy = 4 * x + 4 * y * h->b_stride;
441 
442  h->mb2b_xy[mb_xy] = b_xy;
443  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
444  }
445 
446  if (!h->dequant4_coeff[0])
448 
449  if (!h->DPB) {
450  h->DPB = av_mallocz_array(H264_MAX_PICTURE_COUNT, sizeof(*h->DPB));
451  if (!h->DPB)
452  return AVERROR(ENOMEM);
453  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
454  av_frame_unref(&h->DPB[i].f);
455  av_frame_unref(&h->cur_pic.f);
456  }
457 
458  return 0;
459 
460 fail:
461  ff_h264_free_tables(h, 1);
462  return AVERROR(ENOMEM);
463 }
464 
470 {
471  ERContext *er = &h->er;
472  int mb_array_size = h->mb_height * h->mb_stride;
473  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
474  int c_size = h->mb_stride * (h->mb_height + 1);
475  int yc_size = y_size + 2 * c_size;
476  int x, y, i;
477 
479  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
481  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
482 
483  h->ref_cache[0][scan8[5] + 1] =
484  h->ref_cache[0][scan8[7] + 1] =
485  h->ref_cache[0][scan8[13] + 1] =
486  h->ref_cache[1][scan8[5] + 1] =
487  h->ref_cache[1][scan8[7] + 1] =
488  h->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
489 
491  /* init ER */
492  er->avctx = h->avctx;
493  er->mecc = &h->mecc;
495  er->opaque = h;
496  er->quarter_sample = 1;
497 
498  er->mb_num = h->mb_num;
499  er->mb_width = h->mb_width;
500  er->mb_height = h->mb_height;
501  er->mb_stride = h->mb_stride;
502  er->b8_stride = h->mb_width * 2 + 1;
503 
504  // error resilience code looks cleaner with this
506  (h->mb_num + 1) * sizeof(int), fail);
507 
508  for (y = 0; y < h->mb_height; y++)
509  for (x = 0; x < h->mb_width; x++)
510  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
511 
512  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
513  h->mb_stride + h->mb_width;
514 
516  mb_array_size * sizeof(uint8_t), fail);
517 
518  FF_ALLOC_OR_GOTO(h->avctx, er->mbintra_table, mb_array_size, fail);
519  memset(er->mbintra_table, 1, mb_array_size);
520 
521  FF_ALLOCZ_OR_GOTO(h->avctx, er->mbskip_table, mb_array_size + 2, fail);
522 
524  h->mb_height * h->mb_stride, fail);
525 
527  yc_size * sizeof(int16_t), fail);
528  er->dc_val[0] = h->dc_val_base + h->mb_width * 2 + 2;
529  er->dc_val[1] = h->dc_val_base + y_size + h->mb_stride + 1;
530  er->dc_val[2] = er->dc_val[1] + c_size;
531  for (i = 0; i < yc_size; i++)
532  h->dc_val_base[i] = 1024;
533  }
534 
535  return 0;
536 
537 fail:
538  return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
539 }
540 
541 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
542  int parse_extradata);
543 
545 {
546  AVCodecContext *avctx = h->avctx;
547  int ret;
548 
549  if (avctx->extradata[0] == 1) {
550  int i, cnt, nalsize;
551  unsigned char *p = avctx->extradata;
552 
553  h->is_avc = 1;
554 
555  if (avctx->extradata_size < 7) {
556  av_log(avctx, AV_LOG_ERROR,
557  "avcC %d too short\n", avctx->extradata_size);
558  return AVERROR_INVALIDDATA;
559  }
560  /* sps and pps in the avcC always have length coded with 2 bytes,
561  * so put a fake nal_length_size = 2 while parsing them */
562  h->nal_length_size = 2;
563  // Decode sps from avcC
564  cnt = *(p + 5) & 0x1f; // Number of sps
565  p += 6;
566  for (i = 0; i < cnt; i++) {
567  nalsize = AV_RB16(p) + 2;
568  if (p - avctx->extradata + nalsize > avctx->extradata_size)
569  return AVERROR_INVALIDDATA;
570  ret = decode_nal_units(h, p, nalsize, 1);
571  if (ret < 0) {
572  av_log(avctx, AV_LOG_ERROR,
573  "Decoding sps %d from avcC failed\n", i);
574  return ret;
575  }
576  p += nalsize;
577  }
578  // Decode pps from avcC
579  cnt = *(p++); // Number of pps
580  for (i = 0; i < cnt; i++) {
581  nalsize = AV_RB16(p) + 2;
582  if (p - avctx->extradata + nalsize > avctx->extradata_size)
583  return AVERROR_INVALIDDATA;
584  ret = decode_nal_units(h, p, nalsize, 1);
585  if (ret < 0) {
586  av_log(avctx, AV_LOG_ERROR,
587  "Decoding pps %d from avcC failed\n", i);
588  return ret;
589  }
590  p += nalsize;
591  }
592  // Store right nal length size that will be used to parse all other nals
593  h->nal_length_size = (avctx->extradata[4] & 0x03) + 1;
594  } else {
595  h->is_avc = 0;
596  ret = decode_nal_units(h, avctx->extradata, avctx->extradata_size, 1);
597  if (ret < 0)
598  return ret;
599  }
600  return 0;
601 }
602 
604 {
605  H264Context *h = avctx->priv_data;
606  int i;
607  int ret;
608 
609  h->avctx = avctx;
610 
611  h->bit_depth_luma = 8;
612  h->chroma_format_idc = 1;
613 
614  ff_h264dsp_init(&h->h264dsp, 8, 1);
616  ff_h264qpel_init(&h->h264qpel, 8);
617  ff_h264_pred_init(&h->hpc, h->avctx->codec_id, 8, 1);
618 
619  h->dequant_coeff_pps = -1;
620 
621  /* needed so that IDCT permutation is known early */
623  ff_me_cmp_init(&h->mecc, h->avctx);
624  ff_videodsp_init(&h->vdsp, 8);
625 
626  memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
627  memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
628 
630  h->slice_context_count = 1;
631  h->workaround_bugs = avctx->workaround_bugs;
632  h->flags = avctx->flags;
633 
634  /* set defaults */
635  // s->decode_mb = ff_h263_decode_mb;
636  if (!avctx->has_b_frames)
637  h->low_delay = 1;
638 
640 
642 
644 
645  h->pixel_shift = 0;
646  h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8;
647 
648  h->thread_context[0] = h;
649  h->outputed_poc = h->next_outputed_poc = INT_MIN;
650  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
651  h->last_pocs[i] = INT_MIN;
652  h->prev_poc_msb = 1 << 16;
653  h->x264_build = -1;
655  h->recovery_frame = -1;
656  h->frame_recovered = 0;
657  if (avctx->codec_id == AV_CODEC_ID_H264) {
658  if (avctx->ticks_per_frame == 1)
659  h->avctx->time_base.den *= 2;
660  avctx->ticks_per_frame = 2;
661  }
662 
663  if (avctx->extradata_size > 0 && avctx->extradata) {
664  ret = ff_h264_decode_extradata(h);
665  if (ret < 0)
666  return ret;
667  }
668 
672  h->low_delay = 0;
673  }
674 
675  avctx->internal->allocate_progress = 1;
676 
677  return 0;
678 }
679 
681 {
682  H264Context *h = avctx->priv_data;
683 
684  if (!avctx->internal->is_copy)
685  return 0;
686  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
687  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
688 
689  h->avctx = avctx;
690  h->rbsp_buffer[0] = NULL;
691  h->rbsp_buffer[1] = NULL;
692  h->rbsp_buffer_size[0] = 0;
693  h->rbsp_buffer_size[1] = 0;
694  h->context_initialized = 0;
695 
696  return 0;
697 }
698 
707 static void decode_postinit(H264Context *h, int setup_finished)
708 {
710  H264Picture *cur = h->cur_pic_ptr;
711  int i, pics, out_of_order, out_idx;
712  int invalid = 0, cnt = 0;
713 
714  h->cur_pic_ptr->f.pict_type = h->pict_type;
715 
716  if (h->next_output_pic)
717  return;
718 
719  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
720  /* FIXME: if we have two PAFF fields in one packet, we can't start
721  * the next thread here. If we have one field per packet, we can.
722  * The check in decode_nal_units() is not good enough to find this
723  * yet, so we assume the worst for now. */
724  // if (setup_finished)
725  // ff_thread_finish_setup(h->avctx);
726  return;
727  }
728 
729  cur->f.interlaced_frame = 0;
730  cur->f.repeat_pict = 0;
731 
732  /* Signal interlacing information externally. */
733  /* Prioritize picture timing SEI information over used
734  * decoding process if it exists. */
735 
736  if (h->sps.pic_struct_present_flag) {
737  switch (h->sei_pic_struct) {
739  break;
742  cur->f.interlaced_frame = 1;
743  break;
746  if (FIELD_OR_MBAFF_PICTURE(h))
747  cur->f.interlaced_frame = 1;
748  else
749  // try to flag soft telecine progressive
751  break;
754  /* Signal the possibility of telecined film externally
755  * (pic_struct 5,6). From these hints, let the applications
756  * decide if they apply deinterlacing. */
757  cur->f.repeat_pict = 1;
758  break;
760  cur->f.repeat_pict = 2;
761  break;
763  cur->f.repeat_pict = 4;
764  break;
765  }
766 
767  if ((h->sei_ct_type & 3) &&
769  cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
770  } else {
771  /* Derive interlacing flag from used decoding process. */
773  }
775 
776  if (cur->field_poc[0] != cur->field_poc[1]) {
777  /* Derive top_field_first from field pocs. */
778  cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
779  } else {
781  /* Use picture timing SEI information. Even if it is a
782  * information of a past frame, better than nothing. */
785  cur->f.top_field_first = 1;
786  else
787  cur->f.top_field_first = 0;
788  } else {
789  /* Most likely progressive */
790  cur->f.top_field_first = 0;
791  }
792  }
793 
794  if (h->sei_frame_packing_present &&
799  AVStereo3D *stereo = av_stereo3d_create_side_data(&cur->f);
800  if (!stereo)
801  return;
802 
803  switch (h->frame_packing_arrangement_type) {
804  case 0:
805  stereo->type = AV_STEREO3D_CHECKERBOARD;
806  break;
807  case 1:
808  stereo->type = AV_STEREO3D_COLUMNS;
809  break;
810  case 2:
811  stereo->type = AV_STEREO3D_LINES;
812  break;
813  case 3:
814  if (h->quincunx_subsampling)
816  else
817  stereo->type = AV_STEREO3D_SIDEBYSIDE;
818  break;
819  case 4:
820  stereo->type = AV_STEREO3D_TOPBOTTOM;
821  break;
822  case 5:
824  break;
825  case 6:
826  stereo->type = AV_STEREO3D_2D;
827  break;
828  }
829 
830  if (h->content_interpretation_type == 2)
831  stereo->flags = AV_STEREO3D_FLAG_INVERT;
832  }
833 
836  double angle = h->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
837  AVFrameSideData *rotation = av_frame_new_side_data(&cur->f,
839  sizeof(int32_t) * 9);
840  if (!rotation)
841  return;
842 
843  av_display_rotation_set((int32_t *)rotation->data, angle);
844  av_display_matrix_flip((int32_t *)rotation->data,
845  h->sei_hflip, h->sei_vflip);
846  }
847 
848  // FIXME do something with unavailable reference frames
849 
850  /* Sort B-frames into display order */
851 
855  h->low_delay = 0;
856  }
857 
861  h->low_delay = 0;
862  }
863 
864  pics = 0;
865  while (h->delayed_pic[pics])
866  pics++;
867 
868  assert(pics <= MAX_DELAYED_PIC_COUNT);
869 
870  h->delayed_pic[pics++] = cur;
871  if (cur->reference == 0)
872  cur->reference = DELAYED_PIC_REF;
873 
874  /* Frame reordering. This code takes pictures from coding order and sorts
875  * them by their incremental POC value into display order. It supports POC
876  * gaps, MMCO reset codes and random resets.
877  * A "display group" can start either with a IDR frame (f.key_frame = 1),
878  * and/or can be closed down with a MMCO reset code. In sequences where
879  * there is no delay, we can't detect that (since the frame was already
880  * output to the user), so we also set h->mmco_reset to detect the MMCO
881  * reset code.
882  * FIXME: if we detect insufficient delays (as per h->avctx->has_b_frames),
883  * we increase the delay between input and output. All frames affected by
884  * the lag (e.g. those that should have been output before another frame
885  * that we already returned to the user) will be dropped. This is a bug
886  * that we will fix later. */
887  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
888  cnt += out->poc < h->last_pocs[i];
889  invalid += out->poc == INT_MIN;
890  }
891  if (!h->mmco_reset && !cur->f.key_frame &&
892  cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
893  h->mmco_reset = 2;
894  if (pics > 1)
895  h->delayed_pic[pics - 2]->mmco_reset = 2;
896  }
897  if (h->mmco_reset || cur->f.key_frame) {
898  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
899  h->last_pocs[i] = INT_MIN;
900  cnt = 0;
901  invalid = MAX_DELAYED_PIC_COUNT;
902  }
903  out = h->delayed_pic[0];
904  out_idx = 0;
905  for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
906  h->delayed_pic[i] &&
907  !h->delayed_pic[i - 1]->mmco_reset &&
908  !h->delayed_pic[i]->f.key_frame;
909  i++)
910  if (h->delayed_pic[i]->poc < out->poc) {
911  out = h->delayed_pic[i];
912  out_idx = i;
913  }
914  if (h->avctx->has_b_frames == 0 &&
915  (h->delayed_pic[0]->f.key_frame || h->mmco_reset))
916  h->next_outputed_poc = INT_MIN;
917  out_of_order = !out->f.key_frame && !h->mmco_reset &&
918  (out->poc < h->next_outputed_poc);
919 
922  } else if (out_of_order && pics - 1 == h->avctx->has_b_frames &&
923  h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
924  if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
925  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, cnt);
926  }
927  h->low_delay = 0;
928  } else if (h->low_delay &&
929  ((h->next_outputed_poc != INT_MIN &&
930  out->poc > h->next_outputed_poc + 2) ||
931  cur->f.pict_type == AV_PICTURE_TYPE_B)) {
932  h->low_delay = 0;
933  h->avctx->has_b_frames++;
934  }
935 
936  if (pics > h->avctx->has_b_frames) {
937  out->reference &= ~DELAYED_PIC_REF;
938  // for frame threading, the owner must be the second field's thread or
939  // else the first thread can release the picture and reuse it unsafely
940  for (i = out_idx; h->delayed_pic[i]; i++)
941  h->delayed_pic[i] = h->delayed_pic[i + 1];
942  }
943  memmove(h->last_pocs, &h->last_pocs[1],
944  sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
945  h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
946  if (!out_of_order && pics > h->avctx->has_b_frames) {
947  h->next_output_pic = out;
948  if (out->mmco_reset) {
949  if (out_idx > 0) {
950  h->next_outputed_poc = out->poc;
951  h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
952  } else {
953  h->next_outputed_poc = INT_MIN;
954  }
955  } else {
956  if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f.key_frame) {
957  h->next_outputed_poc = INT_MIN;
958  } else {
959  h->next_outputed_poc = out->poc;
960  }
961  }
962  h->mmco_reset = 0;
963  } else {
964  av_log(h->avctx, AV_LOG_DEBUG, "no picture\n");
965  }
966 
967  if (h->next_output_pic) {
968  if (h->next_output_pic->recovered) {
969  // We have reached an recovery point and all frames after it in
970  // display order are "recovered".
972  }
974  }
975 
976  if (setup_finished && !h->avctx->hwaccel)
978 }
979 
981 {
982  int list, i;
983  int luma_def, chroma_def;
984 
985  h->use_weight = 0;
986  h->use_weight_chroma = 0;
988  if (h->sps.chroma_format_idc)
990  luma_def = 1 << h->luma_log2_weight_denom;
991  chroma_def = 1 << h->chroma_log2_weight_denom;
992 
993  for (list = 0; list < 2; list++) {
994  h->luma_weight_flag[list] = 0;
995  h->chroma_weight_flag[list] = 0;
996  for (i = 0; i < h->ref_count[list]; i++) {
997  int luma_weight_flag, chroma_weight_flag;
998 
999  luma_weight_flag = get_bits1(&h->gb);
1000  if (luma_weight_flag) {
1001  h->luma_weight[i][list][0] = get_se_golomb(&h->gb);
1002  h->luma_weight[i][list][1] = get_se_golomb(&h->gb);
1003  if (h->luma_weight[i][list][0] != luma_def ||
1004  h->luma_weight[i][list][1] != 0) {
1005  h->use_weight = 1;
1006  h->luma_weight_flag[list] = 1;
1007  }
1008  } else {
1009  h->luma_weight[i][list][0] = luma_def;
1010  h->luma_weight[i][list][1] = 0;
1011  }
1012 
1013  if (h->sps.chroma_format_idc) {
1014  chroma_weight_flag = get_bits1(&h->gb);
1015  if (chroma_weight_flag) {
1016  int j;
1017  for (j = 0; j < 2; j++) {
1018  h->chroma_weight[i][list][j][0] = get_se_golomb(&h->gb);
1019  h->chroma_weight[i][list][j][1] = get_se_golomb(&h->gb);
1020  if (h->chroma_weight[i][list][j][0] != chroma_def ||
1021  h->chroma_weight[i][list][j][1] != 0) {
1022  h->use_weight_chroma = 1;
1023  h->chroma_weight_flag[list] = 1;
1024  }
1025  }
1026  } else {
1027  int j;
1028  for (j = 0; j < 2; j++) {
1029  h->chroma_weight[i][list][j][0] = chroma_def;
1030  h->chroma_weight[i][list][j][1] = 0;
1031  }
1032  }
1033  }
1034  }
1036  break;
1037  }
1038  h->use_weight = h->use_weight || h->use_weight_chroma;
1039  return 0;
1040 }
1041 
1045 static void idr(H264Context *h)
1046 {
1048  h->prev_frame_num =
1050  h->prev_poc_msb =
1051  h->prev_poc_lsb = 0;
1052 }
1053 
1054 /* forget old pics after a seek */
1056 {
1057  int i;
1058  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1059  h->last_pocs[i] = INT_MIN;
1060  h->outputed_poc = h->next_outputed_poc = INT_MIN;
1061  h->prev_interlaced_frame = 1;
1062  idr(h);
1063  if (h->cur_pic_ptr)
1064  h->cur_pic_ptr->reference = 0;
1065  h->first_field = 0;
1066  memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
1067  memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
1068  memset(h->default_ref_list[0], 0, sizeof(h->default_ref_list[0]));
1069  memset(h->default_ref_list[1], 0, sizeof(h->default_ref_list[1]));
1070  ff_h264_reset_sei(h);
1071  h->recovery_frame = -1;
1072  h->frame_recovered = 0;
1073 }
1074 
1075 /* forget old pics after a seek */
1076 static void flush_dpb(AVCodecContext *avctx)
1077 {
1078  H264Context *h = avctx->priv_data;
1079  int i;
1080 
1081  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
1082  if (h->delayed_pic[i])
1083  h->delayed_pic[i]->reference = 0;
1084  h->delayed_pic[i] = NULL;
1085  }
1086 
1088 
1089  if (h->DPB)
1090  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
1091  ff_h264_unref_picture(h, &h->DPB[i]);
1092  h->cur_pic_ptr = NULL;
1094 
1095  h->mb_x = h->mb_y = 0;
1096 
1097  h->parse_context.state = -1;
1099  h->parse_context.overread = 0;
1101  h->parse_context.index = 0;
1102  h->parse_context.last_index = 0;
1103 
1104  ff_h264_free_tables(h, 1);
1105  h->context_initialized = 0;
1106 }
1107 
1108 int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
1109 {
1110  const int max_frame_num = 1 << h->sps.log2_max_frame_num;
1111  int field_poc[2];
1112 
1114  if (h->frame_num < h->prev_frame_num)
1115  h->frame_num_offset += max_frame_num;
1116 
1117  if (h->sps.poc_type == 0) {
1118  const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
1119 
1120  if (h->poc_lsb < h->prev_poc_lsb &&
1121  h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
1122  h->poc_msb = h->prev_poc_msb + max_poc_lsb;
1123  else if (h->poc_lsb > h->prev_poc_lsb &&
1124  h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
1125  h->poc_msb = h->prev_poc_msb - max_poc_lsb;
1126  else
1127  h->poc_msb = h->prev_poc_msb;
1128  field_poc[0] =
1129  field_poc[1] = h->poc_msb + h->poc_lsb;
1130  if (h->picture_structure == PICT_FRAME)
1131  field_poc[1] += h->delta_poc_bottom;
1132  } else if (h->sps.poc_type == 1) {
1133  int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
1134  int i;
1135 
1136  if (h->sps.poc_cycle_length != 0)
1137  abs_frame_num = h->frame_num_offset + h->frame_num;
1138  else
1139  abs_frame_num = 0;
1140 
1141  if (h->nal_ref_idc == 0 && abs_frame_num > 0)
1142  abs_frame_num--;
1143 
1144  expected_delta_per_poc_cycle = 0;
1145  for (i = 0; i < h->sps.poc_cycle_length; i++)
1146  // FIXME integrate during sps parse
1147  expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
1148 
1149  if (abs_frame_num > 0) {
1150  int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
1151  int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
1152 
1153  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
1154  for (i = 0; i <= frame_num_in_poc_cycle; i++)
1155  expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
1156  } else
1157  expectedpoc = 0;
1158 
1159  if (h->nal_ref_idc == 0)
1160  expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
1161 
1162  field_poc[0] = expectedpoc + h->delta_poc[0];
1163  field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
1164 
1165  if (h->picture_structure == PICT_FRAME)
1166  field_poc[1] += h->delta_poc[1];
1167  } else {
1168  int poc = 2 * (h->frame_num_offset + h->frame_num);
1169 
1170  if (!h->nal_ref_idc)
1171  poc--;
1172 
1173  field_poc[0] = poc;
1174  field_poc[1] = poc;
1175  }
1176 
1178  pic_field_poc[0] = field_poc[0];
1180  pic_field_poc[1] = field_poc[1];
1181  *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
1182 
1183  return 0;
1184 }
1185 
1194 {
1195  int profile = sps->profile_idc;
1196 
1197  switch (sps->profile_idc) {
1199  // constraint_set1_flag set to 1
1200  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
1201  break;
1205  // constraint_set3_flag set to 1
1206  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
1207  break;
1208  }
1209 
1210  return profile;
1211 }
1212 
1214 {
1215  if (h->flags & CODEC_FLAG_LOW_DELAY ||
1217  !h->sps.num_reorder_frames)) {
1218  if (h->avctx->has_b_frames > 1 || h->delayed_pic[0])
1219  av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. "
1220  "Reenabling low delay requires a codec flush.\n");
1221  else
1222  h->low_delay = 1;
1223  }
1224 
1225  if (h->avctx->has_b_frames < 2)
1226  h->avctx->has_b_frames = !h->low_delay;
1227 
1228  if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
1230  if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
1233  h->pixel_shift = h->sps.bit_depth_luma > 8;
1234 
1236  h->sps.chroma_format_idc);
1240  h->sps.chroma_format_idc);
1242  ff_me_cmp_init(&h->mecc, h->avctx);
1244  } else {
1245  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
1246  h->sps.bit_depth_luma);
1247  return AVERROR_INVALIDDATA;
1248  }
1249  }
1250  return 0;
1251 }
1252 
1254 {
1255  int ref_count[2], list_count;
1256  int num_ref_idx_active_override_flag, max_refs;
1257 
1258  // set defaults, might be overridden a few lines later
1259  ref_count[0] = h->pps.ref_count[0];
1260  ref_count[1] = h->pps.ref_count[1];
1261 
1262  if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
1265  num_ref_idx_active_override_flag = get_bits1(&h->gb);
1266 
1267  if (num_ref_idx_active_override_flag) {
1268  ref_count[0] = get_ue_golomb(&h->gb) + 1;
1269  if (ref_count[0] < 1)
1270  return AVERROR_INVALIDDATA;
1271  if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
1272  ref_count[1] = get_ue_golomb(&h->gb) + 1;
1273  if (ref_count[1] < 1)
1274  return AVERROR_INVALIDDATA;
1275  }
1276  }
1277 
1279  list_count = 2;
1280  else
1281  list_count = 1;
1282  } else {
1283  list_count = 0;
1284  ref_count[0] = ref_count[1] = 0;
1285  }
1286 
1287  max_refs = h->picture_structure == PICT_FRAME ? 16 : 32;
1288 
1289  if (ref_count[0] > max_refs || ref_count[1] > max_refs) {
1290  av_log(h->avctx, AV_LOG_ERROR, "reference overflow\n");
1291  h->ref_count[0] = h->ref_count[1] = 0;
1292  return AVERROR_INVALIDDATA;
1293  }
1294 
1295  if (list_count != h->list_count ||
1296  ref_count[0] != h->ref_count[0] ||
1297  ref_count[1] != h->ref_count[1]) {
1298  h->ref_count[0] = ref_count[0];
1299  h->ref_count[1] = ref_count[1];
1300  h->list_count = list_count;
1301  return 1;
1302  }
1303 
1304  return 0;
1305 }
1306 
1307 static int find_start_code(const uint8_t *buf, int buf_size,
1308  int buf_index, int next_avc)
1309 {
1310  // start code prefix search
1311  for (; buf_index + 3 < next_avc; buf_index++)
1312  // This should always succeed in the first iteration.
1313  if (buf[buf_index] == 0 &&
1314  buf[buf_index + 1] == 0 &&
1315  buf[buf_index + 2] == 1)
1316  break;
1317 
1318  if (buf_index + 3 >= buf_size)
1319  return buf_size;
1320 
1321  return buf_index + 3;
1322 }
1323 
1324 static int get_avc_nalsize(H264Context *h, const uint8_t *buf,
1325  int buf_size, int *buf_index)
1326 {
1327  int i, nalsize = 0;
1328 
1329  if (*buf_index >= buf_size - h->nal_length_size)
1330  return -1;
1331 
1332  for (i = 0; i < h->nal_length_size; i++)
1333  nalsize = (nalsize << 8) | buf[(*buf_index)++];
1334  if (nalsize <= 0 || nalsize > buf_size - *buf_index) {
1336  "AVC: nal size %d\n", nalsize);
1337  return -1;
1338  }
1339  return nalsize;
1340 }
1341 
1342 static int get_bit_length(H264Context *h, const uint8_t *buf,
1343  const uint8_t *ptr, int dst_length,
1344  int i, int next_avc)
1345 {
1346  if ((h->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
1347  buf[i] == 0x00 && buf[i + 1] == 0x00 &&
1348  buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
1350 
1351  if (!(h->workaround_bugs & FF_BUG_TRUNCATED))
1352  while (dst_length > 0 && ptr[dst_length - 1] == 0)
1353  dst_length--;
1354 
1355  if (!dst_length)
1356  return 0;
1357 
1358  return 8 * dst_length - decode_rbsp_trailing(h, ptr + dst_length - 1);
1359 }
1360 
1361 static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size)
1362 {
1363  int next_avc = h->is_avc ? 0 : buf_size;
1364  int nal_index = 0;
1365  int buf_index = 0;
1366  int nals_needed = 0;
1367 
1368  while(1) {
1369  int nalsize = 0;
1370  int dst_length, bit_length, consumed;
1371  const uint8_t *ptr;
1372 
1373  if (buf_index >= next_avc) {
1374  nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
1375  if (nalsize < 0)
1376  break;
1377  next_avc = buf_index + nalsize;
1378  } else {
1379  buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
1380  if (buf_index >= buf_size)
1381  break;
1382  }
1383 
1384  ptr = ff_h264_decode_nal(h, buf + buf_index, &dst_length, &consumed,
1385  next_avc - buf_index);
1386 
1387  if (!ptr || dst_length < 0)
1388  return AVERROR_INVALIDDATA;
1389 
1390  buf_index += consumed;
1391 
1392  bit_length = get_bit_length(h, buf, ptr, dst_length,
1393  buf_index, next_avc);
1394  nal_index++;
1395 
1396  /* packets can sometimes contain multiple PPS/SPS,
1397  * e.g. two PAFF field pictures in one packet, or a demuxer
1398  * which splits NALs strangely if so, when frame threading we
1399  * can't start the next thread until we've read all of them */
1400  switch (h->nal_unit_type) {
1401  case NAL_SPS:
1402  case NAL_PPS:
1403  nals_needed = nal_index;
1404  break;
1405  case NAL_DPA:
1406  case NAL_IDR_SLICE:
1407  case NAL_SLICE:
1408  init_get_bits(&h->gb, ptr, bit_length);
1409  if (!get_ue_golomb(&h->gb))
1410  nals_needed = nal_index;
1411  }
1412  }
1413 
1414  return nals_needed;
1415 }
1416 
1417 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
1418  int parse_extradata)
1419 {
1420  AVCodecContext *const avctx = h->avctx;
1421  H264Context *hx;
1422  int buf_index;
1423  unsigned context_count;
1424  int next_avc;
1425  int nals_needed = 0;
1426  int nal_index;
1427  int ret = 0;
1428 
1430  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) {
1431  h->current_slice = 0;
1432  if (!h->first_field)
1433  h->cur_pic_ptr = NULL;
1434  ff_h264_reset_sei(h);
1435  }
1436 
1437  if (avctx->active_thread_type & FF_THREAD_FRAME)
1438  nals_needed = get_last_needed_nal(h, buf, buf_size);
1439 
1440  {
1441  buf_index = 0;
1442  context_count = 0;
1443  next_avc = h->is_avc ? 0 : buf_size;
1444  nal_index = 0;
1445  for (;;) {
1446  int consumed;
1447  int dst_length;
1448  int bit_length;
1449  const uint8_t *ptr;
1450  int nalsize = 0;
1451  int err;
1452 
1453  if (buf_index >= next_avc) {
1454  nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
1455  if (nalsize < 0)
1456  break;
1457  next_avc = buf_index + nalsize;
1458  } else {
1459  buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
1460  if (buf_index >= buf_size)
1461  break;
1462  if (buf_index >= next_avc)
1463  continue;
1464  }
1465 
1466  hx = h->thread_context[context_count];
1467 
1468  ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length,
1469  &consumed, next_avc - buf_index);
1470  if (!ptr || dst_length < 0) {
1471  ret = -1;
1472  goto end;
1473  }
1474 
1475  bit_length = get_bit_length(h, buf, ptr, dst_length,
1476  buf_index + consumed, next_avc);
1477 
1478  if (h->avctx->debug & FF_DEBUG_STARTCODE)
1480  "NAL %d at %d/%d length %d\n",
1481  hx->nal_unit_type, buf_index, buf_size, dst_length);
1482 
1483  if (h->is_avc && (nalsize != consumed) && nalsize)
1485  "AVC: Consumed only %d bytes instead of %d\n",
1486  consumed, nalsize);
1487 
1488  buf_index += consumed;
1489  nal_index++;
1490 
1491  if (avctx->skip_frame >= AVDISCARD_NONREF &&
1492  h->nal_ref_idc == 0 &&
1493  h->nal_unit_type != NAL_SEI)
1494  continue;
1495 
1496 again:
1497  /* Ignore every NAL unit type except PPS and SPS during extradata
1498  * parsing. Decoding slices is not possible in codec init
1499  * with frame-mt */
1500  if (parse_extradata && HAVE_THREADS &&
1502  (hx->nal_unit_type != NAL_PPS &&
1503  hx->nal_unit_type != NAL_SPS)) {
1504  if (hx->nal_unit_type < NAL_AUD ||
1506  av_log(avctx, AV_LOG_INFO,
1507  "Ignoring NAL unit %d during extradata parsing\n",
1508  hx->nal_unit_type);
1510  }
1511  err = 0;
1512  switch (hx->nal_unit_type) {
1513  case NAL_IDR_SLICE:
1514  if (h->nal_unit_type != NAL_IDR_SLICE) {
1516  "Invalid mix of idr and non-idr slices\n");
1517  ret = -1;
1518  goto end;
1519  }
1520  idr(h); // FIXME ensure we don't lose some frames if there is reordering
1521  case NAL_SLICE:
1522  init_get_bits(&hx->gb, ptr, bit_length);
1523  hx->intra_gb_ptr =
1524  hx->inter_gb_ptr = &hx->gb;
1525  hx->data_partitioning = 0;
1526 
1527  if ((err = ff_h264_decode_slice_header(hx, h)))
1528  break;
1529 
1530  if (h->sei_recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
1532  ((1 << h->sps.log2_max_frame_num) - 1);
1533  }
1534 
1535  h->cur_pic_ptr->f.key_frame |=
1536  (hx->nal_unit_type == NAL_IDR_SLICE) ||
1537  (h->sei_recovery_frame_cnt >= 0);
1538 
1539  if (hx->nal_unit_type == NAL_IDR_SLICE ||
1540  h->recovery_frame == h->frame_num) {
1541  h->recovery_frame = -1;
1542  h->cur_pic_ptr->recovered = 1;
1543  }
1544  // If we have an IDR, all frames after it in decoded order are
1545  // "recovered".
1546  if (hx->nal_unit_type == NAL_IDR_SLICE)
1549 
1550  if (h->current_slice == 1) {
1551  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS))
1552  decode_postinit(h, nal_index >= nals_needed);
1553 
1554  if (h->avctx->hwaccel &&
1555  (ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0)) < 0)
1556  return ret;
1557  }
1558 
1559  if (hx->redundant_pic_count == 0 &&
1560  (avctx->skip_frame < AVDISCARD_NONREF ||
1561  hx->nal_ref_idc) &&
1562  (avctx->skip_frame < AVDISCARD_BIDIR ||
1564  (avctx->skip_frame < AVDISCARD_NONKEY ||
1566  avctx->skip_frame < AVDISCARD_ALL) {
1567  if (avctx->hwaccel) {
1568  ret = avctx->hwaccel->decode_slice(avctx,
1569  &buf[buf_index - consumed],
1570  consumed);
1571  if (ret < 0)
1572  return ret;
1573  } else
1574  context_count++;
1575  }
1576  break;
1577  case NAL_DPA:
1578  if (h->avctx->flags & CODEC_FLAG2_CHUNKS) {
1580  "Decoding in chunks is not supported for "
1581  "partitioned slices.\n");
1582  return AVERROR(ENOSYS);
1583  }
1584 
1585  init_get_bits(&hx->gb, ptr, bit_length);
1586  hx->intra_gb_ptr =
1587  hx->inter_gb_ptr = NULL;
1588 
1589  if ((err = ff_h264_decode_slice_header(hx, h)) < 0) {
1590  /* make sure data_partitioning is cleared if it was set
1591  * before, so we don't try decoding a slice without a valid
1592  * slice header later */
1593  h->data_partitioning = 0;
1594  break;
1595  }
1596 
1597  hx->data_partitioning = 1;
1598  break;
1599  case NAL_DPB:
1600  init_get_bits(&hx->intra_gb, ptr, bit_length);
1601  hx->intra_gb_ptr = &hx->intra_gb;
1602  break;
1603  case NAL_DPC:
1604  init_get_bits(&hx->inter_gb, ptr, bit_length);
1605  hx->inter_gb_ptr = &hx->inter_gb;
1606 
1607  if (hx->redundant_pic_count == 0 &&
1608  hx->intra_gb_ptr &&
1609  hx->data_partitioning &&
1610  h->cur_pic_ptr && h->context_initialized &&
1611  (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) &&
1612  (avctx->skip_frame < AVDISCARD_BIDIR ||
1614  (avctx->skip_frame < AVDISCARD_NONKEY ||
1616  avctx->skip_frame < AVDISCARD_ALL)
1617  context_count++;
1618  break;
1619  case NAL_SEI:
1620  init_get_bits(&h->gb, ptr, bit_length);
1621  ret = ff_h264_decode_sei(h);
1622  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1623  goto end;
1624  break;
1625  case NAL_SPS:
1626  init_get_bits(&h->gb, ptr, bit_length);
1628  if (ret < 0 && h->is_avc && (nalsize != consumed) && nalsize) {
1630  "SPS decoding failure, trying again with the complete NAL\n");
1631  init_get_bits(&h->gb, buf + buf_index + 1 - consumed,
1632  8 * (nalsize - 1));
1634  }
1635 
1637  if (ret < 0)
1638  goto end;
1639 
1640  break;
1641  case NAL_PPS:
1642  init_get_bits(&h->gb, ptr, bit_length);
1643  ret = ff_h264_decode_picture_parameter_set(h, bit_length);
1644  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1645  goto end;
1646  break;
1647  case NAL_AUD:
1648  case NAL_END_SEQUENCE:
1649  case NAL_END_STREAM:
1650  case NAL_FILLER_DATA:
1651  case NAL_SPS_EXT:
1652  case NAL_AUXILIARY_SLICE:
1653  break;
1654  case NAL_FF_IGNORE:
1655  break;
1656  default:
1657  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
1658  hx->nal_unit_type, bit_length);
1659  }
1660 
1661  if (context_count == h->max_contexts) {
1662  ret = ff_h264_execute_decode_slices(h, context_count);
1663  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1664  goto end;
1665  context_count = 0;
1666  }
1667 
1668  if (err < 0) {
1669  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
1670  h->ref_count[0] = h->ref_count[1] = h->list_count = 0;
1671  } else if (err == 1) {
1672  /* Slice could not be decoded in parallel mode, copy down
1673  * NAL unit stuff to context 0 and restart. Note that
1674  * rbsp_buffer is not transferred, but since we no longer
1675  * run in parallel mode this should not be an issue. */
1676  h->nal_unit_type = hx->nal_unit_type;
1677  h->nal_ref_idc = hx->nal_ref_idc;
1678  hx = h;
1679  goto again;
1680  }
1681  }
1682  }
1683  if (context_count) {
1684  ret = ff_h264_execute_decode_slices(h, context_count);
1685  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1686  goto end;
1687  }
1688 
1689  ret = 0;
1690 end:
1691  /* clean up */
1692  if (h->cur_pic_ptr && !h->droppable) {
1695  }
1696 
1697  return (ret < 0) ? ret : buf_index;
1698 }
1699 
1703 static int get_consumed_bytes(int pos, int buf_size)
1704 {
1705  if (pos == 0)
1706  pos = 1; // avoid infinite loops (I doubt that is needed but...)
1707  if (pos + 10 > buf_size)
1708  pos = buf_size; // oops ;)
1709 
1710  return pos;
1711 }
1712 
1713 static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
1714 {
1715  int i;
1716  int ret = av_frame_ref(dst, src);
1717  if (ret < 0)
1718  return ret;
1719 
1720  if (!h->sps.crop)
1721  return 0;
1722 
1723  for (i = 0; i < 3; i++) {
1724  int hshift = (i > 0) ? h->chroma_x_shift : 0;
1725  int vshift = (i > 0) ? h->chroma_y_shift : 0;
1726  int off = ((h->sps.crop_left >> hshift) << h->pixel_shift) +
1727  (h->sps.crop_top >> vshift) * dst->linesize[i];
1728  dst->data[i] += off;
1729  }
1730  return 0;
1731 }
1732 
1733 static int h264_decode_frame(AVCodecContext *avctx, void *data,
1734  int *got_frame, AVPacket *avpkt)
1735 {
1736  const uint8_t *buf = avpkt->data;
1737  int buf_size = avpkt->size;
1738  H264Context *h = avctx->priv_data;
1739  AVFrame *pict = data;
1740  int buf_index = 0;
1741  int ret;
1742 
1743  h->flags = avctx->flags;
1744  /* reset data partitioning here, to ensure GetBitContexts from previous
1745  * packets do not get used. */
1746  h->data_partitioning = 0;
1747 
1748  /* end of stream, output what is still in the buffers */
1749 out:
1750  if (buf_size == 0) {
1751  H264Picture *out;
1752  int i, out_idx;
1753 
1754  h->cur_pic_ptr = NULL;
1755 
1756  // FIXME factorize this with the output code below
1757  out = h->delayed_pic[0];
1758  out_idx = 0;
1759  for (i = 1;
1760  h->delayed_pic[i] &&
1761  !h->delayed_pic[i]->f.key_frame &&
1762  !h->delayed_pic[i]->mmco_reset;
1763  i++)
1764  if (h->delayed_pic[i]->poc < out->poc) {
1765  out = h->delayed_pic[i];
1766  out_idx = i;
1767  }
1768 
1769  for (i = out_idx; h->delayed_pic[i]; i++)
1770  h->delayed_pic[i] = h->delayed_pic[i + 1];
1771 
1772  if (out) {
1773  ret = output_frame(h, pict, &out->f);
1774  if (ret < 0)
1775  return ret;
1776  *got_frame = 1;
1777  }
1778 
1779  return buf_index;
1780  }
1781 
1782  buf_index = decode_nal_units(h, buf, buf_size, 0);
1783  if (buf_index < 0)
1784  return AVERROR_INVALIDDATA;
1785 
1786  if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
1787  buf_size = 0;
1788  goto out;
1789  }
1790 
1791  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
1792  if (avctx->skip_frame >= AVDISCARD_NONREF)
1793  return 0;
1794  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1795  return AVERROR_INVALIDDATA;
1796  }
1797 
1798  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) ||
1799  (h->mb_y >= h->mb_height && h->mb_height)) {
1800  if (avctx->flags2 & CODEC_FLAG2_CHUNKS)
1801  decode_postinit(h, 1);
1802 
1803  ff_h264_field_end(h, 0);
1804 
1805  *got_frame = 0;
1806  if (h->next_output_pic && ((avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT) ||
1807  h->next_output_pic->recovered)) {
1808  if (!h->next_output_pic->recovered)
1810 
1811  ret = output_frame(h, pict, &h->next_output_pic->f);
1812  if (ret < 0)
1813  return ret;
1814  *got_frame = 1;
1815  }
1816  }
1817 
1818  assert(pict->buf[0] || !*got_frame);
1819 
1820  return get_consumed_bytes(buf_index, buf_size);
1821 }
1822 
1824 {
1825  int i;
1826 
1827  ff_h264_free_tables(h, 1); // FIXME cleanup init stuff perhaps
1828 
1829  for (i = 0; i < MAX_SPS_COUNT; i++)
1830  av_freep(h->sps_buffers + i);
1831 
1832  for (i = 0; i < MAX_PPS_COUNT; i++)
1833  av_freep(h->pps_buffers + i);
1834 }
1835 
1837 {
1838  H264Context *h = avctx->priv_data;
1839 
1841 
1843 
1844  return 0;
1845 }
1846 
1847 static const AVProfile profiles[] = {
1848  { FF_PROFILE_H264_BASELINE, "Baseline" },
1849  { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
1850  { FF_PROFILE_H264_MAIN, "Main" },
1851  { FF_PROFILE_H264_EXTENDED, "Extended" },
1852  { FF_PROFILE_H264_HIGH, "High" },
1853  { FF_PROFILE_H264_HIGH_10, "High 10" },
1854  { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
1855  { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
1856  { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
1857  { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
1858  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
1859  { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
1860  { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
1861  { FF_PROFILE_UNKNOWN },
1862 };
1863 
1865  .name = "h264",
1866  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
1867  .type = AVMEDIA_TYPE_VIDEO,
1868  .id = AV_CODEC_ID_H264,
1869  .priv_data_size = sizeof(H264Context),
1873  .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 |
1876  .flush = flush_dpb,
1877  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1878  .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
1879  .profiles = NULL_IF_CONFIG_SMALL(profiles),
1880 };
int chroma_format_idc
Definition: h264.h:160
#define FF_PROFILE_H264_MAIN
Definition: avcodec.h:2655
GetBitContext inter_gb
Definition: h264.h:455
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:893
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:46
MECmpContext * mecc
int ff_h264_decode_seq_parameter_set(H264Context *h)
Decode SPS.
Definition: h264_ps.c:299
discard all frames except keyframes
Definition: avcodec.h:567
uint8_t * edge_emu_buffer
Definition: h264.h:700
void ff_h264_flush_change(H264Context *h)
Definition: h264.c:1055
int workaround_bugs
Definition: h264.h:336
unsigned int top_samples_available
Definition: h264.h:362
#define FF_PROFILE_H264_CAVLC_444
Definition: avcodec.h:2665
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata)
Definition: h264.c:1417
#define DC_128_PRED8x8
Definition: h264pred.h:76
void ff_h264_free_tables(H264Context *h, int free_rbsp)
Definition: h264.c:343
GetBitContext gb
Definition: h264.h:311
Views are packed per line, as if interlaced.
Definition: stereo3d.h:97
#define AV_NUM_DATA_POINTERS
Definition: frame.h:136
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG2 field pics)
Definition: avcodec.h:1563
int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
Call decode_slice() for each context.
Definition: h264_slice.c:2354
5: top field, bottom field, top field repeated, in that order
Definition: h264.h:147
int low_delay
Definition: h264.h:332
int mb_num
Definition: h264.h:504
GetBitContext * intra_gb_ptr
Definition: h264.h:456
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1599
mpeg2/4, h264 default
Definition: pixfmt.h:378
This structure describes decoded (raw) audio or video data.
Definition: frame.h:135
Definition: h264.h:111
int delta_poc[2]
Definition: h264.h:543
Views are alternated temporally.
Definition: stereo3d.h:66
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:179
int quincunx_subsampling
Definition: h264.h:635
3: top field, bottom field, in that order
Definition: h264.h:145
const uint8_t * ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length)
Decode a network abstraction layer unit.
Definition: h264.c:216
#define H264_MAX_PICTURE_COUNT
Definition: h264.h:46
int first_field
Definition: h264.h:420
misc image utilities
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:87
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:129
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:393
Definition: h264.h:115
H264ChromaContext h264chroma
Definition: h264.h:308
uint16_t * cbp_table
Definition: h264.h:471
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:603
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264.h:691
7: frame doubling
Definition: h264.h:149
#define MAX_PPS_COUNT
Definition: h264.h:50
Sequence parameter set.
Definition: h264.h:156
int mb_y
Definition: h264.h:498
int bitstream_restriction_flag
Definition: h264.h:196
#define FF_PROFILE_H264_INTRA
Definition: avcodec.h:2651
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:225
#define FMO
Definition: h264.h:60
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:315
H264Picture * DPB
Definition: h264.h:314
static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size)
Definition: h264.c:1361
int size
Definition: avcodec.h:974
AVBufferPool * mb_type_pool
Definition: h264.h:704
int outputed_poc
Definition: h264.h:569
int chroma_x_shift
Definition: h264.h:326
int flags
Definition: h264.h:335
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1254
int mb_height
Definition: h264.h:502
int16_t * dc_val_base
Definition: h264.h:701
H264Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264.h:566
int is_avc
Used to parse AVC variant of h264.
Definition: h264.h:525
AVBufferPool * ref_index_pool
Definition: h264.h:706
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:39
int ff_h264_get_profile(SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264.c:1193
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:378
void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:325
H264Context.
Definition: h264.h:303
discard all
Definition: avcodec.h:568
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264.h:545
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2514
4: bottom field, top field, in that order
Definition: h264.h:146
struct AVFrame f
Definition: h264.h:264
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264.h:682
AVCodec.
Definition: avcodec.h:2796
int frame_start_found
Definition: parser.h:34
int picture_structure
Definition: h264.h:419
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:412
int profile_idc
Definition: h264.h:158
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1288
#define CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:613
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:884
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
Definition: avcodec.h:2663
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1175
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
uint8_t * chroma_pred_mode_table
Definition: h264.h:476
enum AVDiscard skip_frame
Definition: avcodec.h:2727
#define AV_RN32A(p)
Definition: intreadwrite.h:446
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2432
unsigned int crop_top
frame_cropping_rect_top_offset
Definition: h264.h:181
static int decode_init_thread_copy(AVCodecContext *avctx)
Definition: h264.c:680
uint8_t scaling_matrix4[6][16]
Definition: h264.h:235
void h264_init_dequant_tables(H264Context *h)
Definition: h264_slice.c:382
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:275
#define FF_PROFILE_H264_BASELINE
Definition: avcodec.h:2653
uint32_t(*[6] dequant4_coeff)[16]
Definition: h264.h:406
#define CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries...
Definition: avcodec.h:669
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:37
uint8_t
#define av_cold
Definition: attributes.h:66
int prev_frame_num_offset
for POC type 2
Definition: h264.h:548
int use_weight
Definition: h264.h:425
unsigned int crop_left
frame_cropping_rect_left_offset
Definition: h264.h:179
int offset_for_non_ref_pic
Definition: h264.h:166
void ff_h264_reset_sei(H264Context *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:37
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:123
int data_partitioning
Definition: h264.h:330
int luma_weight[48][2][2]
Definition: h264.h:430
int bit_depth_chroma
bit_depth_chroma_minus8 + 8
Definition: h264.h:210
int poc
frame POC
Definition: h264.h:283
AVCodec ff_h264_decoder
Definition: h264.c:1864
Multithreading support functions.
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int mb_xy
Definition: h264.h:505
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:188
#define emms_c()
Definition: internal.h:47
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:445
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1164
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:684
int frame_recovered
Initial frame has been completely recovered.
Definition: h264.h:689
const char data[16]
Definition: mxf.c:70
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:34
int mb_x
Definition: h264.h:498
H264Picture default_ref_list[2][32]
base reference list for all slices of a coded picture
Definition: h264.h:563
uint8_t * data
Definition: avcodec.h:973
static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
Identify the exact end of the bitstream.
Definition: h264.c:328
#define FF_PROFILE_H264_EXTENDED
Definition: avcodec.h:2656
int chroma_y_shift
Definition: h264.h:326
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:320
#define MAX_DELAYED_PIC_COUNT
Definition: h264.h:54
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
high precision timer, useful to profile code
int recovered
picture at IDR or recovery point + recovery count
Definition: h264.h:297
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1773
int luma_log2_weight_denom
Definition: h264.h:427
int sei_vflip
Definition: h264.h:642
int chroma_weight[48][2][2][2]
Definition: h264.h:431
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264.h:567
#define r
Definition: input.c:51
H.264 / AVC / MPEG4 part10 codec.
int frame_num
Definition: h264.h:544
H264PredContext hpc
Definition: h264.h:360
int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
Decode a slice header.
Definition: h264_slice.c:1188
static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
Definition: h264.c:1713
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:175
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1339
int flags
Additional information about the frame packing.
Definition: stereo3d.h:132
static int get_ue_golomb(GetBitContext *gb)
read unsigned exp golomb code.
Definition: golomb.h:53
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264.c:1703
int poc_type
pic_order_cnt_type
Definition: h264.h:163
int context_initialized
Definition: h264.h:334
static const uint16_t mask[17]
Definition: lzw.c:38
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:713
ParseContext parse_context
Definition: h264.h:310
int nal_unit_type
Definition: h264.h:518
int use_weight_chroma
Definition: h264.h:426
int num_reorder_frames
Definition: h264.h:197
#define AV_RB16
Definition: intreadwrite.h:53
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:66
discard all bidirectional frames
Definition: avcodec.h:566
#define AVERROR(e)
Definition: error.h:43
GetBitContext * inter_gb_ptr
Definition: h264.h:457
#define ALZHEIMER_DC_L0T_PRED8x8
Definition: h264pred.h:79
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:150
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2559
int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
Definition: h264.c:1108
static void flush_dpb(AVCodecContext *avctx)
Definition: h264.c:1076
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:144
PPS pps
current pps
Definition: h264.h:402
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:478
#define FF_PROFILE_H264_HIGH_422
Definition: avcodec.h:2660
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264.h:627
#define FF_PROFILE_H264_HIGH
Definition: avcodec.h:2657
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1144
int direct_spatial_mv_pred
Definition: h264.h:434
ThreadFrame tf
Definition: h264.h:265
0: frame
Definition: h264.h:142
simple assert() macros that are a bit more flexible than ISO C assert().
int overread_index
the index into ParseContext.buffer of the overread bytes
Definition: parser.h:36
#define PICT_TOP_FIELD
Definition: mpegutils.h:33
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:169
const char * name
Name of the codec implementation.
Definition: avcodec.h:2803
H264QpelContext h264qpel
Definition: h264.h:309
ERContext er
Definition: h264.h:312
void ff_init_cabac_states(void)
Definition: cabac.c:124
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:35
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
Definition: h264.h:447
#define FFMAX(a, b)
Definition: common.h:55
#define CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:656
uint8_t * mbintra_table
int * mb_index2xy
int offset_for_top_to_bottom_field
Definition: h264.h:167
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264.h:89
static const uint8_t scan8[16 *3+3]
Definition: h264.h:868
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:190
int crop
frame_cropping_flag
Definition: h264.h:176
uint8_t * error_status_table
uint8_t * direct_table
Definition: h264.h:480
int ff_pred_weight_table(H264Context *h)
Definition: h264.c:980
uint8_t scaling_matrix8[6][64]
Definition: h264.h:236
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:531
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264.h:526
useful rectangle filling function
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
unsigned int left_samples_available
Definition: h264.h:364
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264.c:54
int sei_anticlockwise_rotation
Definition: h264.h:641
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:196
int frame_num_offset
for POC type 2
Definition: h264.h:547
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:434
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2406
int x264_build
Definition: h264.h:496
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2551
uint32_t * mb2br_xy
Definition: h264.h:395
uint8_t * er_temp_buffer
int needs_realloc
picture needs to be reallocated (eg due to a frame size change)
Definition: h264.h:295
int overread
the number of bytes which where irreversibly read from the next frame
Definition: parser.h:35
#define FFMIN(a, b)
Definition: common.h:57
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264.h:410
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:57
int last_index
Definition: parser.h:31
#define H264_MAX_THREADS
Definition: h264.h:47
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
Definition: h264.h:168
int reference
Definition: h264.h:296
int redundant_pic_count
Definition: h264.h:561
int sei_frame_packing_present
frame_packing_arrangment SEI message
Definition: h264.h:632
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:2623
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:426
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
SPS sps
current sps
Definition: h264.h:401
int32_t
PPS * pps_buffers[MAX_PPS_COUNT]
Definition: h264.h:533
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:114
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
Definition: avplay.c:396
int sei_hflip
Definition: h264.h:642
#define MAX_SPS_COUNT
Definition: h264.h:49
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
Decode PPS.
Definition: h264_ps.c:534
Context Adaptive Binary Arithmetic Coder inline functions.
H264Picture ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:448
int mmco_reset
Definition: h264.h:577
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:358
uint8_t * bipred_scratchpad
Definition: h264.h:699
#define AV_EF_EXPLODE
Definition: avcodec.h:2417
int poc_lsb
Definition: h264.h:540
static int h264_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264.c:1733
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1184
int ff_set_ref_count(H264Context *h)
Definition: h264.c:1253
Definition: h264.h:116
#define HAVE_THREADS
Definition: config.h:284
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: mpegutils.h:41
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:402
#define CONFIG_ERROR_RESILIENCE
Definition: config.h:369
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure rotation by the specified angle (in degrees)...
Definition: display.c:52
#define PART_NOT_AVAILABLE
Definition: h264.h:381
unsigned int list_count
Definition: h264.h:446
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2540
if(ac->has_optimized_func)
GetBitContext intra_gb
Definition: h264.h:454
int dequant_coeff_pps
reinit tables when pps changes
Definition: h264.h:535
SPS * sps_buffers[MAX_SPS_COUNT]
Definition: h264.h:532
struct H264Context * thread_context[H264_MAX_THREADS]
Definition: h264.h:588
static const int8_t mv[256][2]
Definition: 4xm.c:75
int chroma_log2_weight_denom
Definition: h264.h:428
int bit_depth_luma
luma bit depth from sps to detect changes
Definition: h264.h:529
short offset_for_ref_frame[256]
Definition: h264.h:195
int chroma_format_idc
chroma format from sps to detect changes
Definition: h264.h:530
VideoDSPContext vdsp
Definition: h264.h:306
NULL
Definition: eval.c:55
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:127
int mb_stride
Definition: h264.h:503
#define AV_LOG_INFO
Standard information.
Definition: log.h:134
AVCodecContext * avctx
Definition: h264.h:304
Libavcodec external API header.
H264 / AVC / MPEG4 part10 codec data table
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
static int get_bit_length(H264Context *h, const uint8_t *buf, const uint8_t *ptr, int dst_length, int i, int next_avc)
Definition: h264.c:1342
#define FF_PROFILE_H264_HIGH_422_INTRA
Definition: avcodec.h:2661
1: top field
Definition: h264.h:143
enum AVCodecID codec_id
Definition: avcodec.h:1067
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:476
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:549
int ff_h264_set_parameter_from_sps(H264Context *h)
Definition: h264.c:1213
Definition: h264.h:114
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:153
int next_outputed_poc
Definition: h264.h:570
int ff_h264_decode_sei(H264Context *h)
Decode SEI.
Definition: h264_sei.c:219
int poc_msb
Definition: h264.h:541
int field_poc[2]
top/bottom POC
Definition: h264.h:282
int debug
debug
Definition: avcodec.h:2362
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
int max_contexts
Max number of threads / contexts.
Definition: h264.h:601
int recovery_frame
recovery_frame is the frame_num at which the next frame should be fully constructed.
Definition: h264.h:676
main external API structure.
Definition: avcodec.h:1050
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:67
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:490
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:128
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
Definition: h264.c:707
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:404
2: bottom field
Definition: h264.h:144
uint8_t * data
Definition: frame.h:104
int frame_packing_arrangement_type
Definition: h264.h:633
static int find_start_code(const uint8_t *buf, int buf_size, int buf_index, int next_avc)
Definition: h264.c:1307
uint32_t state
contains the last few bytes in MSB order
Definition: parser.h:33
int extradata_size
Definition: avcodec.h:1165
int constraint_set_flags
constraint_set[0-3]_flag
Definition: h264.h:212
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:271
SEI_PicStructType sei_pic_struct
pic_struct in picture timing SEI message
Definition: h264.h:619
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:472
#define FF_PROFILE_H264_HIGH_10_INTRA
Definition: avcodec.h:2659
int slice_flags
slice flags
Definition: avcodec.h:1561
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:444
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264.c:1836
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:230
int8_t * ref_index[2]
Definition: h264.h:280
Definition: h264.h:112
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:375
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
Definition: h264.h:318
int mmco_reset
MMCO_RESET set this 1.
Definition: h264.h:285
H264Picture * cur_pic_ptr
Definition: h264.h:315
#define FF_PROFILE_H264_HIGH_444
Definition: avcodec.h:2662
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:2343
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:81
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:164
6: bottom field, top field, bottom field repeated, in that order
Definition: h264.h:148
AVCodecContext * avctx
void ff_h264_draw_horiz_band(H264Context *h, int y, int height)
Definition: h264.c:80
Views are on top of each other.
Definition: stereo3d.h:55
#define FF_BUG_AUTODETECT
autodetection
Definition: avcodec.h:2308
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:283
int pic_struct_present_flag
Definition: h264.h:203
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2969
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:32
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:1823
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:141
Definition: h264.h:117
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2956
int height
Definition: gxfenc.c:72
Views are next to each other.
Definition: stereo3d.h:45
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264.c:1045
discard all non reference
Definition: avcodec.h:565
AVBufferPool * qscale_table_pool
Definition: h264.h:703
H264Picture * next_output_pic
Definition: h264.h:568
int slice_context_count
Definition: h264.h:603
AVBufferPool * motion_val_pool
Definition: h264.h:705
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:388
uint8_t * rbsp_buffer[2]
Definition: h264.h:519
#define tprintf(p,...)
Definition: get_bits.h:626
MECmpContext mecc
Definition: h264.h:305
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:759
common internal api header.
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:755
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264.h:687
int ff_h264_field_end(H264Context *h, int in_setup)
Definition: h264_picture.c:147
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:117
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1771
uint16_t * slice_table_base
Definition: h264.h:537
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:162
int16_t * dc_val[3]
H.264 / AVC / MPEG4 part10 motion vector predicion.
Bi-dir predicted.
Definition: avutil.h:255
AVProfile.
Definition: avcodec.h:2784
int index
Definition: parser.h:30
int ff_h264_context_init(H264Context *h)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264.c:469
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2307
int cur_chroma_format_idc
Definition: h264.h:698
int den
denominator
Definition: rational.h:45
int sei_ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264.h:649
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:499
int bit_depth_luma
bit_depth_luma_minus8 + 8
Definition: h264.h:209
void * priv_data
Definition: avcodec.h:1092
#define PICT_FRAME
Definition: mpegutils.h:35
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264.h:546
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2376
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:325
#define FRAME_MBAFF(h)
Definition: h264.h:71
const uint16_t ff_h264_mb_sizes[4]
Definition: h264.c:52
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:371
int ff_h264_decode_extradata(H264Context *h)
Definition: h264.c:544
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264.h:365
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1100
#define FF_BUG_TRUNCATED
Definition: avcodec.h:2327
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:76
H264Picture cur_pic
Definition: h264.h:316
int sei_display_orientation_present
display orientation SEI message
Definition: h264.h:640
int content_interpretation_type
Definition: h264.h:634
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:191
Views are packed per column.
Definition: stereo3d.h:107
int mb_width
Definition: h264.h:502
enum AVPictureType pict_type
Definition: h264.h:611
int current_slice
current slice number, used to initalize slice_num of each thread/context
Definition: h264.h:593
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1151
uint32_t * mb2b_xy
Definition: h264.h:394
#define FF_PROFILE_H264_HIGH_444_INTRA
Definition: avcodec.h:2664
int delta_poc_bottom
Definition: h264.h:542
#define SLICE_FLAG_CODED_ORDER
draw_horiz_band() is called in coded order instead of display
Definition: avcodec.h:1562
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:205
H264DSPContext h264dsp
Definition: h264.h:307
static int get_avc_nalsize(H264Context *h, const uint8_t *buf, int buf_size, int *buf_index)
Definition: h264.c:1324
Definition: h264.h:110
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264.h:692
int8_t * intra4x4_pred_mode
Definition: h264.h:359
#define FF_PROFILE_H264_CONSTRAINED
Definition: avcodec.h:2650
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
Definition: avcodec.h:2654
8: frame tripling
Definition: h264.h:150
#define AV_RN64A(p)
Definition: intreadwrite.h:450
uint8_t(* non_zero_count)[48]
Definition: h264.h:373
#define FF_PROFILE_H264_HIGH_10
Definition: avcodec.h:2658
exp golomb vlc stuff
uint8_t * mbskip_table
This structure stores compressed data.
Definition: avcodec.h:950
int sei_recovery_frame_cnt
recovery_frame_cnt from SEI message
Definition: h264.h:668
int droppable
Definition: h264.h:329
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2341
#define STARTCODE_TEST
int nal_ref_idc
Definition: h264.h:517
for(j=16;j >0;--j)
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:126
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264_mb.c:804
int b_stride
Definition: h264.h:396
unsigned int rbsp_buffer_size[2]
Definition: h264.h:520
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Context Adaptive Binary Arithmetic Coder.
int8_t ref_cache[2][5 *8]
Definition: h264.h:379
Definition: vf_drawbox.c:37
static const AVProfile profiles[]
Definition: h264.c:1847