Libav
h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
28 #include "libavutil/avassert.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/stereo3d.h"
31 #include "libavutil/timer.h"
32 #include "internal.h"
33 #include "cabac.h"
34 #include "cabac_functions.h"
35 #include "dsputil.h"
36 #include "error_resilience.h"
37 #include "avcodec.h"
38 #include "mpegvideo.h"
39 #include "h264.h"
40 #include "h264data.h"
41 #include "h264chroma.h"
42 #include "h264_mvpred.h"
43 #include "golomb.h"
44 #include "mathops.h"
45 #include "rectangle.h"
46 #include "svq3.h"
47 #include "thread.h"
48 
49 #include <assert.h>
50 
51 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
52 
53 static const uint8_t rem6[QP_MAX_NUM + 1] = {
54  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
55  3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
56  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
57 };
58 
59 static const uint8_t div6[QP_MAX_NUM + 1] = {
60  0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
61  3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
62  7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
63 };
64 
65 static const uint8_t field_scan[16] = {
66  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
67  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
68  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
69  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
70 };
71 
72 static const uint8_t field_scan8x8[64] = {
73  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
74  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
75  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
76  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
77  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
78  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
79  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
80  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
81  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
82  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
83  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
84  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
85  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
86  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
87  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
88  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
89 };
90 
91 static const uint8_t field_scan8x8_cavlc[64] = {
92  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
93  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
94  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
95  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
96  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
97  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
98  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
99  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
100  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
101  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
102  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
103  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
104  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
105  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
106  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
107  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
108 };
109 
110 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
111 static const uint8_t zigzag_scan8x8_cavlc[64] = {
112  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
113  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
114  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
115  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
116  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
117  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
118  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
119  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
120  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
121  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
122  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
123  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
124  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
125  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
126  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
127  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
128 };
129 
130 static const uint8_t dequant4_coeff_init[6][3] = {
131  { 10, 13, 16 },
132  { 11, 14, 18 },
133  { 13, 16, 20 },
134  { 14, 18, 23 },
135  { 16, 20, 25 },
136  { 18, 23, 29 },
137 };
138 
139 static const uint8_t dequant8_coeff_init_scan[16] = {
140  0, 3, 4, 3, 3, 1, 5, 1, 4, 5, 2, 5, 3, 1, 5, 1
141 };
142 
143 static const uint8_t dequant8_coeff_init[6][6] = {
144  { 20, 18, 32, 19, 25, 24 },
145  { 22, 19, 35, 21, 28, 26 },
146  { 26, 23, 42, 24, 33, 31 },
147  { 28, 25, 45, 26, 35, 33 },
148  { 32, 28, 51, 30, 40, 38 },
149  { 36, 32, 58, 34, 46, 43 },
150 };
151 
153 #if CONFIG_H264_DXVA2_HWACCEL
155 #endif
156 #if CONFIG_H264_VAAPI_HWACCEL
158 #endif
159 #if CONFIG_H264_VDA_HWACCEL
161 #endif
162 #if CONFIG_H264_VDPAU_HWACCEL
164 #endif
167 };
168 
170 #if CONFIG_H264_DXVA2_HWACCEL
172 #endif
173 #if CONFIG_H264_VAAPI_HWACCEL
175 #endif
176 #if CONFIG_H264_VDA_HWACCEL
178 #endif
179 #if CONFIG_H264_VDPAU_HWACCEL
181 #endif
184 };
185 
186 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
187  int (*mv)[2][4][2],
188  int mb_x, int mb_y, int mb_intra, int mb_skipped)
189 {
190  H264Context *h = opaque;
191 
192  h->mb_x = mb_x;
193  h->mb_y = mb_y;
194  h->mb_xy = mb_x + mb_y * h->mb_stride;
195  memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
196  assert(ref >= 0);
197  /* FIXME: It is possible albeit uncommon that slice references
198  * differ between slices. We take the easy approach and ignore
199  * it for now. If this turns out to have any relevance in
200  * practice then correct remapping should be added. */
201  if (ref >= h->ref_count[0])
202  ref = 0;
203  fill_rectangle(&h->cur_pic.ref_index[0][4 * h->mb_xy],
204  2, 2, 2, ref, 1);
205  fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
206  fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
207  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
208  assert(!FRAME_MBAFF(h));
210 }
211 
213 {
214  AVCodecContext *avctx = h->avctx;
215  Picture *cur = &h->cur_pic;
216  Picture *last = h->ref_list[0][0].f.data[0] ? &h->ref_list[0][0] : NULL;
217  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
218  int vshift = desc->log2_chroma_h;
219  const int field_pic = h->picture_structure != PICT_FRAME;
220  if (field_pic) {
221  height <<= 1;
222  y <<= 1;
223  }
224 
225  height = FFMIN(height, avctx->height - y);
226 
227  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
228  return;
229 
230  if (avctx->draw_horiz_band) {
231  AVFrame *src;
232  int offset[AV_NUM_DATA_POINTERS];
233  int i;
234 
235  if (cur->f.pict_type == AV_PICTURE_TYPE_B || h->low_delay ||
237  src = &cur->f;
238  else if (last)
239  src = &last->f;
240  else
241  return;
242 
243  offset[0] = y * src->linesize[0];
244  offset[1] =
245  offset[2] = (y >> vshift) * src->linesize[1];
246  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
247  offset[i] = 0;
248 
249  emms_c();
250 
251  avctx->draw_horiz_band(avctx, src, offset,
252  y, h->picture_structure, height);
253  }
254 }
255 
256 static void unref_picture(H264Context *h, Picture *pic)
257 {
258  int off = offsetof(Picture, tf) + sizeof(pic->tf);
259  int i;
260 
261  if (!pic->f.buf[0])
262  return;
263 
264  ff_thread_release_buffer(h->avctx, &pic->tf);
266 
269  for (i = 0; i < 2; i++) {
271  av_buffer_unref(&pic->ref_index_buf[i]);
272  }
273 
274  memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
275 }
276 
277 static void release_unused_pictures(H264Context *h, int remove_current)
278 {
279  int i;
280 
281  /* release non reference frames */
282  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
283  if (h->DPB[i].f.buf[0] && !h->DPB[i].reference &&
284  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
285  unref_picture(h, &h->DPB[i]);
286  }
287  }
288 }
289 
290 static int ref_picture(H264Context *h, Picture *dst, Picture *src)
291 {
292  int ret, i;
293 
294  av_assert0(!dst->f.buf[0]);
295  av_assert0(src->f.buf[0]);
296 
297  src->tf.f = &src->f;
298  dst->tf.f = &dst->f;
299  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
300  if (ret < 0)
301  goto fail;
302 
305  if (!dst->qscale_table_buf || !dst->mb_type_buf)
306  goto fail;
307  dst->qscale_table = src->qscale_table;
308  dst->mb_type = src->mb_type;
309 
310  for (i = 0; i < 2; i++) {
311  dst->motion_val_buf[i] = av_buffer_ref(src->motion_val_buf[i]);
312  dst->ref_index_buf[i] = av_buffer_ref(src->ref_index_buf[i]);
313  if (!dst->motion_val_buf[i] || !dst->ref_index_buf[i])
314  goto fail;
315  dst->motion_val[i] = src->motion_val[i];
316  dst->ref_index[i] = src->ref_index[i];
317  }
318 
319  if (src->hwaccel_picture_private) {
321  if (!dst->hwaccel_priv_buf)
322  goto fail;
324  }
325 
326  for (i = 0; i < 2; i++)
327  dst->field_poc[i] = src->field_poc[i];
328 
329  memcpy(dst->ref_poc, src->ref_poc, sizeof(src->ref_poc));
330  memcpy(dst->ref_count, src->ref_count, sizeof(src->ref_count));
331 
332  dst->poc = src->poc;
333  dst->frame_num = src->frame_num;
334  dst->mmco_reset = src->mmco_reset;
335  dst->pic_id = src->pic_id;
336  dst->long_ref = src->long_ref;
337  dst->mbaff = src->mbaff;
338  dst->field_picture = src->field_picture;
339  dst->needs_realloc = src->needs_realloc;
340  dst->reference = src->reference;
341  dst->recovered = src->recovered;
342 
343  return 0;
344 fail:
345  unref_picture(h, dst);
346  return ret;
347 }
348 
349 static int alloc_scratch_buffers(H264Context *h, int linesize)
350 {
351  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
352 
353  if (h->bipred_scratchpad)
354  return 0;
355 
356  h->bipred_scratchpad = av_malloc(16 * 6 * alloc_size);
357  // edge emu needs blocksize + filter length - 1
358  // (= 21x21 for h264)
359  h->edge_emu_buffer = av_mallocz(alloc_size * 2 * 21);
360  h->me.scratchpad = av_mallocz(alloc_size * 2 * 16 * 2);
361 
362  if (!h->bipred_scratchpad || !h->edge_emu_buffer || !h->me.scratchpad) {
365  av_freep(&h->me.scratchpad);
366  return AVERROR(ENOMEM);
367  }
368 
369  h->me.temp = h->me.scratchpad;
370 
371  return 0;
372 }
373 
375 {
376  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
377  const int mb_array_size = h->mb_stride * h->mb_height;
378  const int b4_stride = h->mb_width * 4 + 1;
379  const int b4_array_size = b4_stride * h->mb_height * 4;
380 
381  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
383  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
384  sizeof(uint32_t), av_buffer_allocz);
385  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
386  sizeof(int16_t), av_buffer_allocz);
387  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
388 
389  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
390  !h->ref_index_pool) {
395  return AVERROR(ENOMEM);
396  }
397 
398  return 0;
399 }
400 
401 static int alloc_picture(H264Context *h, Picture *pic)
402 {
403  int i, ret = 0;
404 
405  av_assert0(!pic->f.data[0]);
406 
407  pic->tf.f = &pic->f;
408  ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
410  if (ret < 0)
411  goto fail;
412 
413  h->linesize = pic->f.linesize[0];
414  h->uvlinesize = pic->f.linesize[1];
415 
416  if (h->avctx->hwaccel) {
417  const AVHWAccel *hwaccel = h->avctx->hwaccel;
419  if (hwaccel->priv_data_size) {
421  if (!pic->hwaccel_priv_buf)
422  return AVERROR(ENOMEM);
424  }
425  }
426 
427  if (!h->qscale_table_pool) {
428  ret = init_table_pools(h);
429  if (ret < 0)
430  goto fail;
431  }
432 
435  if (!pic->qscale_table_buf || !pic->mb_type_buf)
436  goto fail;
437 
438  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
439  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
440 
441  for (i = 0; i < 2; i++) {
444  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
445  goto fail;
446 
447  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
448  pic->ref_index[i] = pic->ref_index_buf[i]->data;
449  }
450 
451  return 0;
452 fail:
453  unref_picture(h, pic);
454  return (ret < 0) ? ret : AVERROR(ENOMEM);
455 }
456 
457 static inline int pic_is_unused(H264Context *h, Picture *pic)
458 {
459  if (!pic->f.buf[0])
460  return 1;
461  if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
462  return 1;
463  return 0;
464 }
465 
467 {
468  int i;
469 
470  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
471  if (pic_is_unused(h, &h->DPB[i]))
472  break;
473  }
474  if (i == MAX_PICTURE_COUNT)
475  return AVERROR_INVALIDDATA;
476 
477  if (h->DPB[i].needs_realloc) {
478  h->DPB[i].needs_realloc = 0;
479  unref_picture(h, &h->DPB[i]);
480  }
481 
482  return i;
483 }
484 
490 {
491  static const int8_t top[12] = {
492  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
493  };
494  static const int8_t left[12] = {
495  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
496  };
497  int i;
498 
499  if (!(h->top_samples_available & 0x8000)) {
500  for (i = 0; i < 4; i++) {
501  int status = top[h->intra4x4_pred_mode_cache[scan8[0] + i]];
502  if (status < 0) {
504  "top block unavailable for requested intra4x4 mode %d at %d %d\n",
505  status, h->mb_x, h->mb_y);
506  return AVERROR_INVALIDDATA;
507  } else if (status) {
508  h->intra4x4_pred_mode_cache[scan8[0] + i] = status;
509  }
510  }
511  }
512 
513  if ((h->left_samples_available & 0x8888) != 0x8888) {
514  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
515  for (i = 0; i < 4; i++)
516  if (!(h->left_samples_available & mask[i])) {
517  int status = left[h->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
518  if (status < 0) {
520  "left block unavailable for requested intra4x4 mode %d at %d %d\n",
521  status, h->mb_x, h->mb_y);
522  return AVERROR_INVALIDDATA;
523  } else if (status) {
524  h->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
525  }
526  }
527  }
528 
529  return 0;
530 } // FIXME cleanup like ff_h264_check_intra_pred_mode
531 
536 int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
537 {
538  static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
539  static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
540 
541  if (mode > 3U) {
543  "out of range intra chroma pred mode at %d %d\n",
544  h->mb_x, h->mb_y);
545  return AVERROR_INVALIDDATA;
546  }
547 
548  if (!(h->top_samples_available & 0x8000)) {
549  mode = top[mode];
550  if (mode < 0) {
552  "top block unavailable for requested intra mode at %d %d\n",
553  h->mb_x, h->mb_y);
554  return AVERROR_INVALIDDATA;
555  }
556  }
557 
558  if ((h->left_samples_available & 0x8080) != 0x8080) {
559  mode = left[mode];
560  if (is_chroma && (h->left_samples_available & 0x8080)) {
561  // mad cow disease mode, aka MBAFF + constrained_intra_pred
562  mode = ALZHEIMER_DC_L0T_PRED8x8 +
563  (!(h->left_samples_available & 0x8000)) +
564  2 * (mode == DC_128_PRED8x8);
565  }
566  if (mode < 0) {
568  "left block unavailable for requested intra mode at %d %d\n",
569  h->mb_x, h->mb_y);
570  return AVERROR_INVALIDDATA;
571  }
572  }
573 
574  return mode;
575 }
576 
578  int *dst_length, int *consumed, int length)
579 {
580  int i, si, di;
581  uint8_t *dst;
582  int bufidx;
583 
584  // src[0]&0x80; // forbidden bit
585  h->nal_ref_idc = src[0] >> 5;
586  h->nal_unit_type = src[0] & 0x1F;
587 
588  src++;
589  length--;
590 
591 #define STARTCODE_TEST \
592  if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
593  if (src[i + 2] != 3) { \
594  /* startcode, so we must be past the end */ \
595  length = i; \
596  } \
597  break; \
598  }
599 
600 #if HAVE_FAST_UNALIGNED
601 #define FIND_FIRST_ZERO \
602  if (i > 0 && !src[i]) \
603  i--; \
604  while (src[i]) \
605  i++
606 
607 #if HAVE_FAST_64BIT
608  for (i = 0; i + 1 < length; i += 9) {
609  if (!((~AV_RN64A(src + i) &
610  (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
611  0x8000800080008080ULL))
612  continue;
613  FIND_FIRST_ZERO;
615  i -= 7;
616  }
617 #else
618  for (i = 0; i + 1 < length; i += 5) {
619  if (!((~AV_RN32A(src + i) &
620  (AV_RN32A(src + i) - 0x01000101U)) &
621  0x80008080U))
622  continue;
623  FIND_FIRST_ZERO;
625  i -= 3;
626  }
627 #endif
628 #else
629  for (i = 0; i + 1 < length; i += 2) {
630  if (src[i])
631  continue;
632  if (i > 0 && src[i - 1] == 0)
633  i--;
635  }
636 #endif
637 
638  if (i >= length - 1) { // no escaped 0
639  *dst_length = length;
640  *consumed = length + 1; // +1 for the header
641  return src;
642  }
643 
644  // use second escape buffer for inter data
645  bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0;
646  av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx],
648  dst = h->rbsp_buffer[bufidx];
649 
650  if (dst == NULL)
651  return NULL;
652 
653  memcpy(dst, src, i);
654  si = di = i;
655  while (si + 2 < length) {
656  // remove escapes (very rare 1:2^22)
657  if (src[si + 2] > 3) {
658  dst[di++] = src[si++];
659  dst[di++] = src[si++];
660  } else if (src[si] == 0 && src[si + 1] == 0) {
661  if (src[si + 2] == 3) { // escape
662  dst[di++] = 0;
663  dst[di++] = 0;
664  si += 3;
665  continue;
666  } else // next start code
667  goto nsc;
668  }
669 
670  dst[di++] = src[si++];
671  }
672  while (si < length)
673  dst[di++] = src[si++];
674 
675 nsc:
676  memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
677 
678  *dst_length = di;
679  *consumed = si + 1; // +1 for the header
680  /* FIXME store exact number of bits in the getbitcontext
681  * (it is needed for decoding) */
682  return dst;
683 }
684 
689 static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
690 {
691  int v = *src;
692  int r;
693 
694  tprintf(h->avctx, "rbsp trailing %X\n", v);
695 
696  for (r = 1; r < 9; r++) {
697  if (v & 1)
698  return r;
699  v >>= 1;
700  }
701  return 0;
702 }
703 
704 static inline int get_lowest_part_list_y(H264Context *h, Picture *pic, int n,
705  int height, int y_offset, int list)
706 {
707  int raw_my = h->mv_cache[list][scan8[n]][1];
708  int filter_height_up = (raw_my & 3) ? 2 : 0;
709  int filter_height_down = (raw_my & 3) ? 3 : 0;
710  int full_my = (raw_my >> 2) + y_offset;
711  int top = full_my - filter_height_up;
712  int bottom = full_my + filter_height_down + height;
713 
714  return FFMAX(abs(top), bottom);
715 }
716 
717 static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
718  int height, int y_offset, int list0,
719  int list1, int *nrefs)
720 {
721  int my;
722 
723  y_offset += 16 * (h->mb_y >> MB_FIELD(h));
724 
725  if (list0) {
726  int ref_n = h->ref_cache[0][scan8[n]];
727  Picture *ref = &h->ref_list[0][ref_n];
728 
729  // Error resilience puts the current picture in the ref list.
730  // Don't try to wait on these as it will cause a deadlock.
731  // Fields can wait on each other, though.
732  if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
733  (ref->reference & 3) != h->picture_structure) {
734  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
735  if (refs[0][ref_n] < 0)
736  nrefs[0] += 1;
737  refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
738  }
739  }
740 
741  if (list1) {
742  int ref_n = h->ref_cache[1][scan8[n]];
743  Picture *ref = &h->ref_list[1][ref_n];
744 
745  if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
746  (ref->reference & 3) != h->picture_structure) {
747  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
748  if (refs[1][ref_n] < 0)
749  nrefs[1] += 1;
750  refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
751  }
752  }
753 }
754 
761 {
762  const int mb_xy = h->mb_xy;
763  const int mb_type = h->cur_pic.mb_type[mb_xy];
764  int refs[2][48];
765  int nrefs[2] = { 0 };
766  int ref, list;
767 
768  memset(refs, -1, sizeof(refs));
769 
770  if (IS_16X16(mb_type)) {
771  get_lowest_part_y(h, refs, 0, 16, 0,
772  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
773  } else if (IS_16X8(mb_type)) {
774  get_lowest_part_y(h, refs, 0, 8, 0,
775  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
776  get_lowest_part_y(h, refs, 8, 8, 8,
777  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
778  } else if (IS_8X16(mb_type)) {
779  get_lowest_part_y(h, refs, 0, 16, 0,
780  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
781  get_lowest_part_y(h, refs, 4, 16, 0,
782  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
783  } else {
784  int i;
785 
786  assert(IS_8X8(mb_type));
787 
788  for (i = 0; i < 4; i++) {
789  const int sub_mb_type = h->sub_mb_type[i];
790  const int n = 4 * i;
791  int y_offset = (i & 2) << 2;
792 
793  if (IS_SUB_8X8(sub_mb_type)) {
794  get_lowest_part_y(h, refs, n, 8, y_offset,
795  IS_DIR(sub_mb_type, 0, 0),
796  IS_DIR(sub_mb_type, 0, 1),
797  nrefs);
798  } else if (IS_SUB_8X4(sub_mb_type)) {
799  get_lowest_part_y(h, refs, n, 4, y_offset,
800  IS_DIR(sub_mb_type, 0, 0),
801  IS_DIR(sub_mb_type, 0, 1),
802  nrefs);
803  get_lowest_part_y(h, refs, n + 2, 4, y_offset + 4,
804  IS_DIR(sub_mb_type, 0, 0),
805  IS_DIR(sub_mb_type, 0, 1),
806  nrefs);
807  } else if (IS_SUB_4X8(sub_mb_type)) {
808  get_lowest_part_y(h, refs, n, 8, y_offset,
809  IS_DIR(sub_mb_type, 0, 0),
810  IS_DIR(sub_mb_type, 0, 1),
811  nrefs);
812  get_lowest_part_y(h, refs, n + 1, 8, y_offset,
813  IS_DIR(sub_mb_type, 0, 0),
814  IS_DIR(sub_mb_type, 0, 1),
815  nrefs);
816  } else {
817  int j;
818  assert(IS_SUB_4X4(sub_mb_type));
819  for (j = 0; j < 4; j++) {
820  int sub_y_offset = y_offset + 2 * (j & 2);
821  get_lowest_part_y(h, refs, n + j, 4, sub_y_offset,
822  IS_DIR(sub_mb_type, 0, 0),
823  IS_DIR(sub_mb_type, 0, 1),
824  nrefs);
825  }
826  }
827  }
828  }
829 
830  for (list = h->list_count - 1; list >= 0; list--)
831  for (ref = 0; ref < 48 && nrefs[list]; ref++) {
832  int row = refs[list][ref];
833  if (row >= 0) {
834  Picture *ref_pic = &h->ref_list[list][ref];
835  int ref_field = ref_pic->reference - 1;
836  int ref_field_picture = ref_pic->field_picture;
837  int pic_height = 16 * h->mb_height >> ref_field_picture;
838 
839  row <<= MB_MBAFF(h);
840  nrefs[list]--;
841 
842  if (!FIELD_PICTURE(h) && ref_field_picture) { // frame referencing two fields
843  ff_thread_await_progress(&ref_pic->tf,
844  FFMIN((row >> 1) - !(row & 1),
845  pic_height - 1),
846  1);
847  ff_thread_await_progress(&ref_pic->tf,
848  FFMIN((row >> 1), pic_height - 1),
849  0);
850  } else if (FIELD_PICTURE(h) && !ref_field_picture) { // field referencing one field of a frame
851  ff_thread_await_progress(&ref_pic->tf,
852  FFMIN(row * 2 + ref_field,
853  pic_height - 1),
854  0);
855  } else if (FIELD_PICTURE(h)) {
856  ff_thread_await_progress(&ref_pic->tf,
857  FFMIN(row, pic_height - 1),
858  ref_field);
859  } else {
860  ff_thread_await_progress(&ref_pic->tf,
861  FFMIN(row, pic_height - 1),
862  0);
863  }
864  }
865  }
866 }
867 
869  int n, int square, int height,
870  int delta, int list,
871  uint8_t *dest_y, uint8_t *dest_cb,
872  uint8_t *dest_cr,
873  int src_x_offset, int src_y_offset,
874  qpel_mc_func *qpix_op,
875  h264_chroma_mc_func chroma_op,
876  int pixel_shift, int chroma_idc)
877 {
878  const int mx = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8;
879  int my = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8;
880  const int luma_xy = (mx & 3) + ((my & 3) << 2);
881  ptrdiff_t offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize;
882  uint8_t *src_y = pic->f.data[0] + offset;
883  uint8_t *src_cb, *src_cr;
884  int extra_width = 0;
885  int extra_height = 0;
886  int emu = 0;
887  const int full_mx = mx >> 2;
888  const int full_my = my >> 2;
889  const int pic_width = 16 * h->mb_width;
890  const int pic_height = 16 * h->mb_height >> MB_FIELD(h);
891  int ysh;
892 
893  if (mx & 7)
894  extra_width -= 3;
895  if (my & 7)
896  extra_height -= 3;
897 
898  if (full_mx < 0 - extra_width ||
899  full_my < 0 - extra_height ||
900  full_mx + 16 /*FIXME*/ > pic_width + extra_width ||
901  full_my + 16 /*FIXME*/ > pic_height + extra_height) {
903  src_y - (2 << pixel_shift) - 2 * h->mb_linesize,
904  h->mb_linesize, h->mb_linesize,
905  16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
906  full_my - 2, pic_width, pic_height);
907  src_y = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
908  emu = 1;
909  }
910 
911  qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); // FIXME try variable height perhaps?
912  if (!square)
913  qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize);
914 
915  if (CONFIG_GRAY && h->flags & CODEC_FLAG_GRAY)
916  return;
917 
918  if (chroma_idc == 3 /* yuv444 */) {
919  src_cb = pic->f.data[1] + offset;
920  if (emu) {
922  src_cb - (2 << pixel_shift) - 2 * h->mb_linesize,
923  h->mb_linesize, h->mb_linesize,
924  16 + 5, 16 + 5 /*FIXME*/,
925  full_mx - 2, full_my - 2,
926  pic_width, pic_height);
927  src_cb = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
928  }
929  qpix_op[luma_xy](dest_cb, src_cb, h->mb_linesize); // FIXME try variable height perhaps?
930  if (!square)
931  qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize);
932 
933  src_cr = pic->f.data[2] + offset;
934  if (emu) {
936  src_cr - (2 << pixel_shift) - 2 * h->mb_linesize,
937  h->mb_linesize, h->mb_linesize,
938  16 + 5, 16 + 5 /*FIXME*/,
939  full_mx - 2, full_my - 2,
940  pic_width, pic_height);
941  src_cr = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
942  }
943  qpix_op[luma_xy](dest_cr, src_cr, h->mb_linesize); // FIXME try variable height perhaps?
944  if (!square)
945  qpix_op[luma_xy](dest_cr + delta, src_cr + delta, h->mb_linesize);
946  return;
947  }
948 
949  ysh = 3 - (chroma_idc == 2 /* yuv422 */);
950  if (chroma_idc == 1 /* yuv420 */ && MB_FIELD(h)) {
951  // chroma offset when predicting from a field of opposite parity
952  my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
953  emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
954  }
955 
956  src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) +
957  (my >> ysh) * h->mb_uvlinesize;
958  src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) +
959  (my >> ysh) * h->mb_uvlinesize;
960 
961  if (emu) {
962  h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb,
964  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
965  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
966  src_cb = h->edge_emu_buffer;
967  }
968  chroma_op(dest_cb, src_cb, h->mb_uvlinesize,
969  height >> (chroma_idc == 1 /* yuv420 */),
970  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
971 
972  if (emu) {
973  h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr,
975  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
976  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
977  src_cr = h->edge_emu_buffer;
978  }
979  chroma_op(dest_cr, src_cr, h->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */),
980  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
981 }
982 
983 static av_always_inline void mc_part_std(H264Context *h, int n, int square,
984  int height, int delta,
985  uint8_t *dest_y, uint8_t *dest_cb,
986  uint8_t *dest_cr,
987  int x_offset, int y_offset,
988  qpel_mc_func *qpix_put,
989  h264_chroma_mc_func chroma_put,
990  qpel_mc_func *qpix_avg,
991  h264_chroma_mc_func chroma_avg,
992  int list0, int list1,
993  int pixel_shift, int chroma_idc)
994 {
995  qpel_mc_func *qpix_op = qpix_put;
996  h264_chroma_mc_func chroma_op = chroma_put;
997 
998  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
999  if (chroma_idc == 3 /* yuv444 */) {
1000  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
1001  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
1002  } else if (chroma_idc == 2 /* yuv422 */) {
1003  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
1004  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
1005  } else { /* yuv420 */
1006  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
1007  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
1008  }
1009  x_offset += 8 * h->mb_x;
1010  y_offset += 8 * (h->mb_y >> MB_FIELD(h));
1011 
1012  if (list0) {
1013  Picture *ref = &h->ref_list[0][h->ref_cache[0][scan8[n]]];
1014  mc_dir_part(h, ref, n, square, height, delta, 0,
1015  dest_y, dest_cb, dest_cr, x_offset, y_offset,
1016  qpix_op, chroma_op, pixel_shift, chroma_idc);
1017 
1018  qpix_op = qpix_avg;
1019  chroma_op = chroma_avg;
1020  }
1021 
1022  if (list1) {
1023  Picture *ref = &h->ref_list[1][h->ref_cache[1][scan8[n]]];
1024  mc_dir_part(h, ref, n, square, height, delta, 1,
1025  dest_y, dest_cb, dest_cr, x_offset, y_offset,
1026  qpix_op, chroma_op, pixel_shift, chroma_idc);
1027  }
1028 }
1029 
1031  int height, int delta,
1032  uint8_t *dest_y, uint8_t *dest_cb,
1033  uint8_t *dest_cr,
1034  int x_offset, int y_offset,
1035  qpel_mc_func *qpix_put,
1036  h264_chroma_mc_func chroma_put,
1037  h264_weight_func luma_weight_op,
1038  h264_weight_func chroma_weight_op,
1039  h264_biweight_func luma_weight_avg,
1040  h264_biweight_func chroma_weight_avg,
1041  int list0, int list1,
1042  int pixel_shift, int chroma_idc)
1043 {
1044  int chroma_height;
1045 
1046  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
1047  if (chroma_idc == 3 /* yuv444 */) {
1048  chroma_height = height;
1049  chroma_weight_avg = luma_weight_avg;
1050  chroma_weight_op = luma_weight_op;
1051  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
1052  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
1053  } else if (chroma_idc == 2 /* yuv422 */) {
1054  chroma_height = height;
1055  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
1056  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
1057  } else { /* yuv420 */
1058  chroma_height = height >> 1;
1059  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
1060  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
1061  }
1062  x_offset += 8 * h->mb_x;
1063  y_offset += 8 * (h->mb_y >> MB_FIELD(h));
1064 
1065  if (list0 && list1) {
1066  /* don't optimize for luma-only case, since B-frames usually
1067  * use implicit weights => chroma too. */
1068  uint8_t *tmp_cb = h->bipred_scratchpad;
1069  uint8_t *tmp_cr = h->bipred_scratchpad + (16 << pixel_shift);
1070  uint8_t *tmp_y = h->bipred_scratchpad + 16 * h->mb_uvlinesize;
1071  int refn0 = h->ref_cache[0][scan8[n]];
1072  int refn1 = h->ref_cache[1][scan8[n]];
1073 
1074  mc_dir_part(h, &h->ref_list[0][refn0], n, square, height, delta, 0,
1075  dest_y, dest_cb, dest_cr,
1076  x_offset, y_offset, qpix_put, chroma_put,
1077  pixel_shift, chroma_idc);
1078  mc_dir_part(h, &h->ref_list[1][refn1], n, square, height, delta, 1,
1079  tmp_y, tmp_cb, tmp_cr,
1080  x_offset, y_offset, qpix_put, chroma_put,
1081  pixel_shift, chroma_idc);
1082 
1083  if (h->use_weight == 2) {
1084  int weight0 = h->implicit_weight[refn0][refn1][h->mb_y & 1];
1085  int weight1 = 64 - weight0;
1086  luma_weight_avg(dest_y, tmp_y, h->mb_linesize,
1087  height, 5, weight0, weight1, 0);
1088  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize,
1089  chroma_height, 5, weight0, weight1, 0);
1090  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize,
1091  chroma_height, 5, weight0, weight1, 0);
1092  } else {
1093  luma_weight_avg(dest_y, tmp_y, h->mb_linesize, height,
1095  h->luma_weight[refn0][0][0],
1096  h->luma_weight[refn1][1][0],
1097  h->luma_weight[refn0][0][1] +
1098  h->luma_weight[refn1][1][1]);
1099  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, chroma_height,
1101  h->chroma_weight[refn0][0][0][0],
1102  h->chroma_weight[refn1][1][0][0],
1103  h->chroma_weight[refn0][0][0][1] +
1104  h->chroma_weight[refn1][1][0][1]);
1105  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, chroma_height,
1107  h->chroma_weight[refn0][0][1][0],
1108  h->chroma_weight[refn1][1][1][0],
1109  h->chroma_weight[refn0][0][1][1] +
1110  h->chroma_weight[refn1][1][1][1]);
1111  }
1112  } else {
1113  int list = list1 ? 1 : 0;
1114  int refn = h->ref_cache[list][scan8[n]];
1115  Picture *ref = &h->ref_list[list][refn];
1116  mc_dir_part(h, ref, n, square, height, delta, list,
1117  dest_y, dest_cb, dest_cr, x_offset, y_offset,
1118  qpix_put, chroma_put, pixel_shift, chroma_idc);
1119 
1120  luma_weight_op(dest_y, h->mb_linesize, height,
1122  h->luma_weight[refn][list][0],
1123  h->luma_weight[refn][list][1]);
1124  if (h->use_weight_chroma) {
1125  chroma_weight_op(dest_cb, h->mb_uvlinesize, chroma_height,
1127  h->chroma_weight[refn][list][0][0],
1128  h->chroma_weight[refn][list][0][1]);
1129  chroma_weight_op(dest_cr, h->mb_uvlinesize, chroma_height,
1131  h->chroma_weight[refn][list][1][0],
1132  h->chroma_weight[refn][list][1][1]);
1133  }
1134  }
1135 }
1136 
1138  int pixel_shift, int chroma_idc)
1139 {
1140  /* fetch pixels for estimated mv 4 macroblocks ahead
1141  * optimized for 64byte cache lines */
1142  const int refn = h->ref_cache[list][scan8[0]];
1143  if (refn >= 0) {
1144  const int mx = (h->mv_cache[list][scan8[0]][0] >> 2) + 16 * h->mb_x + 8;
1145  const int my = (h->mv_cache[list][scan8[0]][1] >> 2) + 16 * h->mb_y;
1146  uint8_t **src = h->ref_list[list][refn].f.data;
1147  int off = (mx << pixel_shift) +
1148  (my + (h->mb_x & 3) * 4) * h->mb_linesize +
1149  (64 << pixel_shift);
1150  h->vdsp.prefetch(src[0] + off, h->linesize, 4);
1151  if (chroma_idc == 3 /* yuv444 */) {
1152  h->vdsp.prefetch(src[1] + off, h->linesize, 4);
1153  h->vdsp.prefetch(src[2] + off, h->linesize, 4);
1154  } else {
1155  off = ((mx >> 1) << pixel_shift) +
1156  ((my >> 1) + (h->mb_x & 7)) * h->uvlinesize +
1157  (64 << pixel_shift);
1158  h->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1159  }
1160  }
1161 }
1162 
1163 static void free_tables(H264Context *h, int free_rbsp)
1164 {
1165  int i;
1166  H264Context *hx;
1167 
1170  av_freep(&h->cbp_table);
1171  av_freep(&h->mvd_table[0]);
1172  av_freep(&h->mvd_table[1]);
1173  av_freep(&h->direct_table);
1174  av_freep(&h->non_zero_count);
1176  h->slice_table = NULL;
1177  av_freep(&h->list_counts);
1178 
1179  av_freep(&h->mb2b_xy);
1180  av_freep(&h->mb2br_xy);
1181 
1186 
1187  if (free_rbsp && h->DPB) {
1188  for (i = 0; i < MAX_PICTURE_COUNT; i++)
1189  unref_picture(h, &h->DPB[i]);
1190  av_freep(&h->DPB);
1191  } else if (h->DPB) {
1192  for (i = 0; i < MAX_PICTURE_COUNT; i++)
1193  h->DPB[i].needs_realloc = 1;
1194  }
1195 
1196  h->cur_pic_ptr = NULL;
1197 
1198  for (i = 0; i < MAX_THREADS; i++) {
1199  hx = h->thread_context[i];
1200  if (!hx)
1201  continue;
1202  av_freep(&hx->top_borders[1]);
1203  av_freep(&hx->top_borders[0]);
1205  av_freep(&hx->edge_emu_buffer);
1206  av_freep(&hx->dc_val_base);
1207  av_freep(&hx->me.scratchpad);
1208  av_freep(&hx->er.mb_index2xy);
1210  av_freep(&hx->er.er_temp_buffer);
1211  av_freep(&hx->er.mbintra_table);
1212  av_freep(&hx->er.mbskip_table);
1213 
1214  if (free_rbsp) {
1215  av_freep(&hx->rbsp_buffer[1]);
1216  av_freep(&hx->rbsp_buffer[0]);
1217  hx->rbsp_buffer_size[0] = 0;
1218  hx->rbsp_buffer_size[1] = 0;
1219  }
1220  if (i)
1221  av_freep(&h->thread_context[i]);
1222  }
1223 }
1224 
1226 {
1227  int i, j, q, x;
1228  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
1229 
1230  for (i = 0; i < 6; i++) {
1231  h->dequant8_coeff[i] = h->dequant8_buffer[i];
1232  for (j = 0; j < i; j++)
1233  if (!memcmp(h->pps.scaling_matrix8[j], h->pps.scaling_matrix8[i],
1234  64 * sizeof(uint8_t))) {
1235  h->dequant8_coeff[i] = h->dequant8_buffer[j];
1236  break;
1237  }
1238  if (j < i)
1239  continue;
1240 
1241  for (q = 0; q < max_qp + 1; q++) {
1242  int shift = div6[q];
1243  int idx = rem6[q];
1244  for (x = 0; x < 64; x++)
1245  h->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] =
1246  ((uint32_t)dequant8_coeff_init[idx][dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] *
1247  h->pps.scaling_matrix8[i][x]) << shift;
1248  }
1249  }
1250 }
1251 
1253 {
1254  int i, j, q, x;
1255  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
1256  for (i = 0; i < 6; i++) {
1257  h->dequant4_coeff[i] = h->dequant4_buffer[i];
1258  for (j = 0; j < i; j++)
1259  if (!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i],
1260  16 * sizeof(uint8_t))) {
1261  h->dequant4_coeff[i] = h->dequant4_buffer[j];
1262  break;
1263  }
1264  if (j < i)
1265  continue;
1266 
1267  for (q = 0; q < max_qp + 1; q++) {
1268  int shift = div6[q] + 2;
1269  int idx = rem6[q];
1270  for (x = 0; x < 16; x++)
1271  h->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] =
1272  ((uint32_t)dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] *
1273  h->pps.scaling_matrix4[i][x]) << shift;
1274  }
1275  }
1276 }
1277 
1279 {
1280  int i, x;
1282  if (h->pps.transform_8x8_mode)
1284  if (h->sps.transform_bypass) {
1285  for (i = 0; i < 6; i++)
1286  for (x = 0; x < 16; x++)
1287  h->dequant4_coeff[i][0][x] = 1 << 6;
1289  for (i = 0; i < 6; i++)
1290  for (x = 0; x < 64; x++)
1291  h->dequant8_coeff[i][0][x] = 1 << 6;
1292  }
1293 }
1294 
1296 {
1297  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
1298  const int row_mb_num = h->mb_stride * 2 * h->avctx->thread_count;
1299  int x, y, i;
1300 
1302  row_mb_num * 8 * sizeof(uint8_t), fail)
1304  big_mb_num * 48 * sizeof(uint8_t), fail)
1306  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
1308  big_mb_num * sizeof(uint16_t), fail)
1310  big_mb_num * sizeof(uint8_t), fail)
1311  FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[0],
1312  16 * row_mb_num * sizeof(uint8_t), fail);
1313  FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[1],
1314  16 * row_mb_num * sizeof(uint8_t), fail);
1316  4 * big_mb_num * sizeof(uint8_t), fail);
1318  big_mb_num * sizeof(uint8_t), fail)
1319 
1320  memset(h->slice_table_base, -1,
1321  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
1322  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
1323 
1325  big_mb_num * sizeof(uint32_t), fail);
1327  big_mb_num * sizeof(uint32_t), fail);
1328  for (y = 0; y < h->mb_height; y++)
1329  for (x = 0; x < h->mb_width; x++) {
1330  const int mb_xy = x + y * h->mb_stride;
1331  const int b_xy = 4 * x + 4 * y * h->b_stride;
1332 
1333  h->mb2b_xy[mb_xy] = b_xy;
1334  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
1335  }
1336 
1337  if (!h->dequant4_coeff[0])
1339 
1340  if (!h->DPB) {
1341  h->DPB = av_mallocz_array(MAX_PICTURE_COUNT, sizeof(*h->DPB));
1342  if (!h->DPB)
1343  return AVERROR(ENOMEM);
1344  for (i = 0; i < MAX_PICTURE_COUNT; i++)
1345  av_frame_unref(&h->DPB[i].f);
1346  av_frame_unref(&h->cur_pic.f);
1347  }
1348 
1349  return 0;
1350 
1351 fail:
1352  free_tables(h, 1);
1353  return AVERROR(ENOMEM);
1354 }
1355 
1359 static void clone_tables(H264Context *dst, H264Context *src, int i)
1360 {
1361  dst->intra4x4_pred_mode = src->intra4x4_pred_mode + i * 8 * 2 * src->mb_stride;
1362  dst->non_zero_count = src->non_zero_count;
1363  dst->slice_table = src->slice_table;
1364  dst->cbp_table = src->cbp_table;
1365  dst->mb2b_xy = src->mb2b_xy;
1366  dst->mb2br_xy = src->mb2br_xy;
1368  dst->mvd_table[0] = src->mvd_table[0] + i * 8 * 2 * src->mb_stride;
1369  dst->mvd_table[1] = src->mvd_table[1] + i * 8 * 2 * src->mb_stride;
1370  dst->direct_table = src->direct_table;
1371  dst->list_counts = src->list_counts;
1372  dst->DPB = src->DPB;
1373  dst->cur_pic_ptr = src->cur_pic_ptr;
1374  dst->cur_pic = src->cur_pic;
1375  dst->bipred_scratchpad = NULL;
1376  dst->edge_emu_buffer = NULL;
1377  dst->me.scratchpad = NULL;
1379  src->sps.chroma_format_idc);
1380 }
1381 
1387 {
1388  ERContext *er = &h->er;
1389  int mb_array_size = h->mb_height * h->mb_stride;
1390  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
1391  int c_size = h->mb_stride * (h->mb_height + 1);
1392  int yc_size = y_size + 2 * c_size;
1393  int x, y, i;
1394 
1396  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
1398  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
1399 
1400  h->ref_cache[0][scan8[5] + 1] =
1401  h->ref_cache[0][scan8[7] + 1] =
1402  h->ref_cache[0][scan8[13] + 1] =
1403  h->ref_cache[1][scan8[5] + 1] =
1404  h->ref_cache[1][scan8[7] + 1] =
1405  h->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
1406 
1408  /* init ER */
1409  er->avctx = h->avctx;
1410  er->dsp = &h->dsp;
1412  er->opaque = h;
1413  er->quarter_sample = 1;
1414 
1415  er->mb_num = h->mb_num;
1416  er->mb_width = h->mb_width;
1417  er->mb_height = h->mb_height;
1418  er->mb_stride = h->mb_stride;
1419  er->b8_stride = h->mb_width * 2 + 1;
1420 
1421  FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy, (h->mb_num + 1) * sizeof(int),
1422  fail); // error ressilience code looks cleaner with this
1423  for (y = 0; y < h->mb_height; y++)
1424  for (x = 0; x < h->mb_width; x++)
1425  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
1426 
1427  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
1428  h->mb_stride + h->mb_width;
1429 
1431  mb_array_size * sizeof(uint8_t), fail);
1432 
1433  FF_ALLOC_OR_GOTO(h->avctx, er->mbintra_table, mb_array_size, fail);
1434  memset(er->mbintra_table, 1, mb_array_size);
1435 
1436  FF_ALLOCZ_OR_GOTO(h->avctx, er->mbskip_table, mb_array_size + 2, fail);
1437 
1439  fail);
1440 
1441  FF_ALLOCZ_OR_GOTO(h->avctx, h->dc_val_base, yc_size * sizeof(int16_t), fail);
1442  er->dc_val[0] = h->dc_val_base + h->mb_width * 2 + 2;
1443  er->dc_val[1] = h->dc_val_base + y_size + h->mb_stride + 1;
1444  er->dc_val[2] = er->dc_val[1] + c_size;
1445  for (i = 0; i < yc_size; i++)
1446  h->dc_val_base[i] = 1024;
1447  }
1448 
1449  return 0;
1450 
1451 fail:
1452  return AVERROR(ENOMEM); // free_tables will clean up for us
1453 }
1454 
1455 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
1456  int parse_extradata);
1457 
1459 {
1460  AVCodecContext *avctx = h->avctx;
1461  int ret;
1462 
1463  if (avctx->extradata[0] == 1) {
1464  int i, cnt, nalsize;
1465  unsigned char *p = avctx->extradata;
1466 
1467  h->is_avc = 1;
1468 
1469  if (avctx->extradata_size < 7) {
1470  av_log(avctx, AV_LOG_ERROR,
1471  "avcC %d too short\n", avctx->extradata_size);
1472  return AVERROR_INVALIDDATA;
1473  }
1474  /* sps and pps in the avcC always have length coded with 2 bytes,
1475  * so put a fake nal_length_size = 2 while parsing them */
1476  h->nal_length_size = 2;
1477  // Decode sps from avcC
1478  cnt = *(p + 5) & 0x1f; // Number of sps
1479  p += 6;
1480  for (i = 0; i < cnt; i++) {
1481  nalsize = AV_RB16(p) + 2;
1482  if (p - avctx->extradata + nalsize > avctx->extradata_size)
1483  return AVERROR_INVALIDDATA;
1484  ret = decode_nal_units(h, p, nalsize, 1);
1485  if (ret < 0) {
1486  av_log(avctx, AV_LOG_ERROR,
1487  "Decoding sps %d from avcC failed\n", i);
1488  return ret;
1489  }
1490  p += nalsize;
1491  }
1492  // Decode pps from avcC
1493  cnt = *(p++); // Number of pps
1494  for (i = 0; i < cnt; i++) {
1495  nalsize = AV_RB16(p) + 2;
1496  if (p - avctx->extradata + nalsize > avctx->extradata_size)
1497  return AVERROR_INVALIDDATA;
1498  ret = decode_nal_units(h, p, nalsize, 1);
1499  if (ret < 0) {
1500  av_log(avctx, AV_LOG_ERROR,
1501  "Decoding pps %d from avcC failed\n", i);
1502  return ret;
1503  }
1504  p += nalsize;
1505  }
1506  // Now store right nal length size, that will be used to parse all other nals
1507  h->nal_length_size = (avctx->extradata[4] & 0x03) + 1;
1508  } else {
1509  h->is_avc = 0;
1510  ret = decode_nal_units(h, avctx->extradata, avctx->extradata_size, 1);
1511  if (ret < 0)
1512  return ret;
1513  }
1514  return 0;
1515 }
1516 
1518 {
1519  H264Context *h = avctx->priv_data;
1520  int i;
1521  int ret;
1522 
1523  h->avctx = avctx;
1524 
1525  h->bit_depth_luma = 8;
1526  h->chroma_format_idc = 1;
1527 
1528  ff_h264dsp_init(&h->h264dsp, 8, 1);
1530  ff_h264qpel_init(&h->h264qpel, 8);
1531  ff_h264_pred_init(&h->hpc, h->avctx->codec_id, 8, 1);
1532 
1533  h->dequant_coeff_pps = -1;
1534 
1535  /* needed so that IDCT permutation is known early */
1537  ff_dsputil_init(&h->dsp, h->avctx);
1538  ff_videodsp_init(&h->vdsp, 8);
1539 
1540  memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
1541  memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
1542 
1544  h->slice_context_count = 1;
1545  h->workaround_bugs = avctx->workaround_bugs;
1546  h->flags = avctx->flags;
1547 
1548  /* set defaults */
1549  // s->decode_mb = ff_h263_decode_mb;
1550  if (!avctx->has_b_frames)
1551  h->low_delay = 1;
1552 
1554 
1556 
1558 
1559  h->pixel_shift = 0;
1560  h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8;
1561 
1562  h->thread_context[0] = h;
1563  h->outputed_poc = h->next_outputed_poc = INT_MIN;
1564  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1565  h->last_pocs[i] = INT_MIN;
1566  h->prev_poc_msb = 1 << 16;
1567  h->x264_build = -1;
1568  ff_h264_reset_sei(h);
1569  h->recovery_frame = -1;
1570  h->frame_recovered = 0;
1571  if (avctx->codec_id == AV_CODEC_ID_H264) {
1572  if (avctx->ticks_per_frame == 1)
1573  h->avctx->time_base.den *= 2;
1574  avctx->ticks_per_frame = 2;
1575  }
1576 
1577  if (avctx->extradata_size > 0 && avctx->extradata) {
1578  ret = ff_h264_decode_extradata(h);
1579  if (ret < 0)
1580  return ret;
1581  }
1582 
1586  h->low_delay = 0;
1587  }
1588 
1589  avctx->internal->allocate_progress = 1;
1590 
1591  return 0;
1592 }
1593 
1594 #define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size))))
1595 #undef REBASE_PICTURE
1596 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
1597  ((pic && pic >= old_ctx->DPB && \
1598  pic < old_ctx->DPB + MAX_PICTURE_COUNT) ? \
1599  &new_ctx->DPB[pic - old_ctx->DPB] : NULL)
1600 
1601 static void copy_picture_range(Picture **to, Picture **from, int count,
1602  H264Context *new_base,
1603  H264Context *old_base)
1604 {
1605  int i;
1606 
1607  for (i = 0; i < count; i++) {
1608  assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
1609  IN_RANGE(from[i], old_base->DPB,
1610  sizeof(Picture) * MAX_PICTURE_COUNT) ||
1611  !from[i]));
1612  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
1613  }
1614 }
1615 
1616 static int copy_parameter_set(void **to, void **from, int count, int size)
1617 {
1618  int i;
1619 
1620  for (i = 0; i < count; i++) {
1621  if (to[i] && !from[i]) {
1622  av_freep(&to[i]);
1623  } else if (from[i] && !to[i]) {
1624  to[i] = av_malloc(size);
1625  if (!to[i])
1626  return AVERROR(ENOMEM);
1627  }
1628 
1629  if (from[i])
1630  memcpy(to[i], from[i], size);
1631  }
1632 
1633  return 0;
1634 }
1635 
1637 {
1638  H264Context *h = avctx->priv_data;
1639 
1640  if (!avctx->internal->is_copy)
1641  return 0;
1642  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1643  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1644 
1645  h->context_initialized = 0;
1646 
1647  return 0;
1648 }
1649 
1650 #define copy_fields(to, from, start_field, end_field) \
1651  memcpy(&to->start_field, &from->start_field, \
1652  (char *)&to->end_field - (char *)&to->start_field)
1653 
1654 static int h264_slice_header_init(H264Context *, int);
1655 
1657 
1659  const AVCodecContext *src)
1660 {
1661  H264Context *h = dst->priv_data, *h1 = src->priv_data;
1662  int inited = h->context_initialized, err = 0;
1663  int context_reinitialized = 0;
1664  int i, ret;
1665 
1666  if (dst == src || !h1->context_initialized)
1667  return 0;
1668 
1669  if (inited &&
1670  (h->width != h1->width ||
1671  h->height != h1->height ||
1672  h->mb_width != h1->mb_width ||
1673  h->mb_height != h1->mb_height ||
1674  h->sps.bit_depth_luma != h1->sps.bit_depth_luma ||
1675  h->sps.chroma_format_idc != h1->sps.chroma_format_idc ||
1676  h->sps.colorspace != h1->sps.colorspace)) {
1677 
1678  /* set bits_per_raw_sample to the previous value. the check for changed
1679  * bit depth in h264_set_parameter_from_sps() uses it and sets it to
1680  * the current value */
1682 
1684 
1685  h->width = h1->width;
1686  h->height = h1->height;
1687  h->mb_height = h1->mb_height;
1688  h->mb_width = h1->mb_width;
1689  h->mb_num = h1->mb_num;
1690  h->mb_stride = h1->mb_stride;
1691  h->b_stride = h1->b_stride;
1692 
1693  if ((err = h264_slice_header_init(h, 1)) < 0) {
1694  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
1695  return err;
1696  }
1697  context_reinitialized = 1;
1698 
1699  /* update linesize on resize. The decoder doesn't
1700  * necessarily call h264_frame_start in the new thread */
1701  h->linesize = h1->linesize;
1702  h->uvlinesize = h1->uvlinesize;
1703 
1704  /* copy block_offset since frame_start may not be called */
1705  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
1706  }
1707 
1708  if (!inited) {
1709  for (i = 0; i < MAX_SPS_COUNT; i++)
1710  av_freep(h->sps_buffers + i);
1711 
1712  for (i = 0; i < MAX_PPS_COUNT; i++)
1713  av_freep(h->pps_buffers + i);
1714 
1715  memcpy(h, h1, sizeof(*h1));
1716  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1717  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1718  memset(&h->er, 0, sizeof(h->er));
1719  memset(&h->me, 0, sizeof(h->me));
1720  memset(&h->mb, 0, sizeof(h->mb));
1721  memset(&h->mb_luma_dc, 0, sizeof(h->mb_luma_dc));
1722  memset(&h->mb_padding, 0, sizeof(h->mb_padding));
1723  h->context_initialized = 0;
1724 
1725  memset(&h->cur_pic, 0, sizeof(h->cur_pic));
1726  av_frame_unref(&h->cur_pic.f);
1727  h->cur_pic.tf.f = &h->cur_pic.f;
1728 
1729  h->avctx = dst;
1730  h->DPB = NULL;
1731  h->qscale_table_pool = NULL;
1732  h->mb_type_pool = NULL;
1733  h->ref_index_pool = NULL;
1734  h->motion_val_pool = NULL;
1735 
1736  ret = ff_h264_alloc_tables(h);
1737  if (ret < 0) {
1738  av_log(dst, AV_LOG_ERROR, "Could not allocate memory\n");
1739  return ret;
1740  }
1741  ret = context_init(h);
1742  if (ret < 0) {
1743  av_log(dst, AV_LOG_ERROR, "context_init() failed.\n");
1744  return ret;
1745  }
1746 
1747  for (i = 0; i < 2; i++) {
1748  h->rbsp_buffer[i] = NULL;
1749  h->rbsp_buffer_size[i] = 0;
1750  }
1751  h->bipred_scratchpad = NULL;
1752  h->edge_emu_buffer = NULL;
1753 
1754  h->thread_context[0] = h;
1755 
1756  h->context_initialized = 1;
1757  }
1758 
1759  h->avctx->coded_height = h1->avctx->coded_height;
1760  h->avctx->coded_width = h1->avctx->coded_width;
1761  h->avctx->width = h1->avctx->width;
1762  h->avctx->height = h1->avctx->height;
1763  h->coded_picture_number = h1->coded_picture_number;
1764  h->first_field = h1->first_field;
1765  h->picture_structure = h1->picture_structure;
1766  h->qscale = h1->qscale;
1767  h->droppable = h1->droppable;
1768  h->low_delay = h1->low_delay;
1769 
1770  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1771  unref_picture(h, &h->DPB[i]);
1772  if (h1->DPB[i].f.buf[0] &&
1773  (ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
1774  return ret;
1775  }
1776 
1777  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
1778  unref_picture(h, &h->cur_pic);
1779  if ((ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0)
1780  return ret;
1781 
1782  h->workaround_bugs = h1->workaround_bugs;
1783  h->low_delay = h1->low_delay;
1784  h->droppable = h1->droppable;
1785 
1786  /* frame_start may not be called for the next thread (if it's decoding
1787  * a bottom field) so this has to be allocated here */
1788  err = alloc_scratch_buffers(h, h1->linesize);
1789  if (err < 0)
1790  return err;
1791 
1792  // extradata/NAL handling
1793  h->is_avc = h1->is_avc;
1794 
1795  // SPS/PPS
1796  if ((ret = copy_parameter_set((void **)h->sps_buffers,
1797  (void **)h1->sps_buffers,
1798  MAX_SPS_COUNT, sizeof(SPS))) < 0)
1799  return ret;
1800  h->sps = h1->sps;
1801  if ((ret = copy_parameter_set((void **)h->pps_buffers,
1802  (void **)h1->pps_buffers,
1803  MAX_PPS_COUNT, sizeof(PPS))) < 0)
1804  return ret;
1805  h->pps = h1->pps;
1806 
1807  // Dequantization matrices
1808  // FIXME these are big - can they be only copied when PPS changes?
1809  copy_fields(h, h1, dequant4_buffer, dequant4_coeff);
1810 
1811  for (i = 0; i < 6; i++)
1812  h->dequant4_coeff[i] = h->dequant4_buffer[0] +
1813  (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]);
1814 
1815  for (i = 0; i < 6; i++)
1816  h->dequant8_coeff[i] = h->dequant8_buffer[0] +
1817  (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]);
1818 
1819  h->dequant_coeff_pps = h1->dequant_coeff_pps;
1820 
1821  // POC timing
1822  copy_fields(h, h1, poc_lsb, redundant_pic_count);
1823 
1824  // reference lists
1825  copy_fields(h, h1, short_ref, cabac_init_idc);
1826 
1827  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
1828  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
1829  copy_picture_range(h->delayed_pic, h1->delayed_pic,
1830  MAX_DELAYED_PIC_COUNT + 2, h, h1);
1831 
1832  h->last_slice_type = h1->last_slice_type;
1833 
1834  if (context_reinitialized)
1836 
1837  if (!h->cur_pic_ptr)
1838  return 0;
1839 
1840  if (!h->droppable) {
1842  h->prev_poc_msb = h->poc_msb;
1843  h->prev_poc_lsb = h->poc_lsb;
1844  }
1846  h->prev_frame_num = h->frame_num;
1848 
1849  h->recovery_frame = h1->recovery_frame;
1850  h->frame_recovered = h1->frame_recovered;
1851 
1852  return err;
1853 }
1854 
1856 {
1857  Picture *pic;
1858  int i, ret;
1859  const int pixel_shift = h->pixel_shift;
1860 
1862  h->cur_pic_ptr = NULL;
1863 
1864  i = find_unused_picture(h);
1865  if (i < 0) {
1866  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1867  return i;
1868  }
1869  pic = &h->DPB[i];
1870 
1871  pic->reference = h->droppable ? 0 : h->picture_structure;
1874  /*
1875  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
1876  * in later.
1877  * See decode_nal_units().
1878  */
1879  pic->f.key_frame = 0;
1880  pic->mmco_reset = 0;
1881  pic->recovered = 0;
1882 
1883  if ((ret = alloc_picture(h, pic)) < 0)
1884  return ret;
1885 
1886  h->cur_pic_ptr = pic;
1887  unref_picture(h, &h->cur_pic);
1888  if ((ret = ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
1889  return ret;
1890 
1892  ff_er_frame_start(&h->er);
1893 
1894  assert(h->linesize && h->uvlinesize);
1895 
1896  for (i = 0; i < 16; i++) {
1897  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1898  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1899  }
1900  for (i = 0; i < 16; i++) {
1901  h->block_offset[16 + i] =
1902  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1903  h->block_offset[48 + 16 + i] =
1904  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1905  }
1906 
1907  /* can't be in alloc_tables because linesize isn't known there.
1908  * FIXME: redo bipred weight to not require extra buffer? */
1909  for (i = 0; i < h->slice_context_count; i++)
1910  if (h->thread_context[i]) {
1912  if (ret < 0)
1913  return ret;
1914  }
1915 
1916  /* Some macroblocks can be accessed before they're available in case
1917  * of lost slices, MBAFF or threading. */
1918  memset(h->slice_table, -1,
1919  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1920 
1921  // s->decode = (s->flags & CODEC_FLAG_PSNR) || !s->encoding ||
1922  // s->current_picture.f.reference /* || h->contains_intra */ || 1;
1923 
1924  /* We mark the current picture as non-reference after allocating it, so
1925  * that if we break out due to an error it can be released automatically
1926  * in the next ff_MPV_frame_start().
1927  */
1928  h->cur_pic_ptr->reference = 0;
1929 
1930  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
1931 
1932  h->next_output_pic = NULL;
1933 
1934  assert(h->cur_pic_ptr->long_ref == 0);
1935 
1936  return 0;
1937 }
1938 
1947 static void decode_postinit(H264Context *h, int setup_finished)
1948 {
1949  Picture *out = h->cur_pic_ptr;
1950  Picture *cur = h->cur_pic_ptr;
1951  int i, pics, out_of_order, out_idx;
1952  int invalid = 0, cnt = 0;
1953 
1954  h->cur_pic_ptr->f.pict_type = h->pict_type;
1955 
1956  if (h->next_output_pic)
1957  return;
1958 
1959  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
1960  /* FIXME: if we have two PAFF fields in one packet, we can't start
1961  * the next thread here. If we have one field per packet, we can.
1962  * The check in decode_nal_units() is not good enough to find this
1963  * yet, so we assume the worst for now. */
1964  // if (setup_finished)
1965  // ff_thread_finish_setup(h->avctx);
1966  return;
1967  }
1968 
1969  cur->f.interlaced_frame = 0;
1970  cur->f.repeat_pict = 0;
1971 
1972  /* Signal interlacing information externally. */
1973  /* Prioritize picture timing SEI information over used
1974  * decoding process if it exists. */
1975 
1976  if (h->sps.pic_struct_present_flag) {
1977  switch (h->sei_pic_struct) {
1978  case SEI_PIC_STRUCT_FRAME:
1979  break;
1982  cur->f.interlaced_frame = 1;
1983  break;
1986  if (FIELD_OR_MBAFF_PICTURE(h))
1987  cur->f.interlaced_frame = 1;
1988  else
1989  // try to flag soft telecine progressive
1991  break;
1994  /* Signal the possibility of telecined film externally
1995  * (pic_struct 5,6). From these hints, let the applications
1996  * decide if they apply deinterlacing. */
1997  cur->f.repeat_pict = 1;
1998  break;
2000  cur->f.repeat_pict = 2;
2001  break;
2003  cur->f.repeat_pict = 4;
2004  break;
2005  }
2006 
2007  if ((h->sei_ct_type & 3) &&
2009  cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
2010  } else {
2011  /* Derive interlacing flag from used decoding process. */
2013  }
2015 
2016  if (cur->field_poc[0] != cur->field_poc[1]) {
2017  /* Derive top_field_first from field pocs. */
2018  cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
2019  } else {
2020  if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
2021  /* Use picture timing SEI information. Even if it is a
2022  * information of a past frame, better than nothing. */
2025  cur->f.top_field_first = 1;
2026  else
2027  cur->f.top_field_first = 0;
2028  } else {
2029  /* Most likely progressive */
2030  cur->f.top_field_first = 0;
2031  }
2032  }
2033 
2034  if (h->sei_frame_packing_present &&
2037  h->content_interpretation_type > 0 &&
2038  h->content_interpretation_type < 3) {
2039  AVStereo3D *stereo = av_stereo3d_create_side_data(&cur->f);
2040  if (!stereo)
2041  return;
2042 
2043  switch (h->frame_packing_arrangement_type) {
2044  case 0:
2045  stereo->type = AV_STEREO3D_CHECKERBOARD;
2046  break;
2047  case 1:
2048  stereo->type = AV_STEREO3D_COLUMNS;
2049  break;
2050  case 2:
2051  stereo->type = AV_STEREO3D_LINES;
2052  break;
2053  case 3:
2054  if (h->quincunx_subsampling)
2056  else
2057  stereo->type = AV_STEREO3D_SIDEBYSIDE;
2058  break;
2059  case 4:
2060  stereo->type = AV_STEREO3D_TOPBOTTOM;
2061  break;
2062  case 5:
2063  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2064  break;
2065  case 6:
2066  stereo->type = AV_STEREO3D_2D;
2067  break;
2068  }
2069 
2070  if (h->content_interpretation_type == 2)
2071  stereo->flags = AV_STEREO3D_FLAG_INVERT;
2072  }
2073 
2074  // FIXME do something with unavailable reference frames
2075 
2076  /* Sort B-frames into display order */
2077 
2081  h->low_delay = 0;
2082  }
2083 
2087  h->low_delay = 0;
2088  }
2089 
2090  pics = 0;
2091  while (h->delayed_pic[pics])
2092  pics++;
2093 
2094  assert(pics <= MAX_DELAYED_PIC_COUNT);
2095 
2096  h->delayed_pic[pics++] = cur;
2097  if (cur->reference == 0)
2098  cur->reference = DELAYED_PIC_REF;
2099 
2100  /* Frame reordering. This code takes pictures from coding order and sorts
2101  * them by their incremental POC value into display order. It supports POC
2102  * gaps, MMCO reset codes and random resets.
2103  * A "display group" can start either with a IDR frame (f.key_frame = 1),
2104  * and/or can be closed down with a MMCO reset code. In sequences where
2105  * there is no delay, we can't detect that (since the frame was already
2106  * output to the user), so we also set h->mmco_reset to detect the MMCO
2107  * reset code.
2108  * FIXME: if we detect insufficient delays (as per h->avctx->has_b_frames),
2109  * we increase the delay between input and output. All frames affected by
2110  * the lag (e.g. those that should have been output before another frame
2111  * that we already returned to the user) will be dropped. This is a bug
2112  * that we will fix later. */
2113  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
2114  cnt += out->poc < h->last_pocs[i];
2115  invalid += out->poc == INT_MIN;
2116  }
2117  if (!h->mmco_reset && !cur->f.key_frame &&
2118  cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
2119  h->mmco_reset = 2;
2120  if (pics > 1)
2121  h->delayed_pic[pics - 2]->mmco_reset = 2;
2122  }
2123  if (h->mmco_reset || cur->f.key_frame) {
2124  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
2125  h->last_pocs[i] = INT_MIN;
2126  cnt = 0;
2127  invalid = MAX_DELAYED_PIC_COUNT;
2128  }
2129  out = h->delayed_pic[0];
2130  out_idx = 0;
2131  for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
2132  h->delayed_pic[i] &&
2133  !h->delayed_pic[i - 1]->mmco_reset &&
2134  !h->delayed_pic[i]->f.key_frame;
2135  i++)
2136  if (h->delayed_pic[i]->poc < out->poc) {
2137  out = h->delayed_pic[i];
2138  out_idx = i;
2139  }
2140  if (h->avctx->has_b_frames == 0 &&
2141  (h->delayed_pic[0]->f.key_frame || h->mmco_reset))
2142  h->next_outputed_poc = INT_MIN;
2143  out_of_order = !out->f.key_frame && !h->mmco_reset &&
2144  (out->poc < h->next_outputed_poc);
2145 
2148  } else if (out_of_order && pics - 1 == h->avctx->has_b_frames &&
2149  h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
2150  if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
2151  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, cnt);
2152  }
2153  h->low_delay = 0;
2154  } else if (h->low_delay &&
2155  ((h->next_outputed_poc != INT_MIN &&
2156  out->poc > h->next_outputed_poc + 2) ||
2157  cur->f.pict_type == AV_PICTURE_TYPE_B)) {
2158  h->low_delay = 0;
2159  h->avctx->has_b_frames++;
2160  }
2161 
2162  if (pics > h->avctx->has_b_frames) {
2163  out->reference &= ~DELAYED_PIC_REF;
2164  // for frame threading, the owner must be the second field's thread or
2165  // else the first thread can release the picture and reuse it unsafely
2166  for (i = out_idx; h->delayed_pic[i]; i++)
2167  h->delayed_pic[i] = h->delayed_pic[i + 1];
2168  }
2169  memmove(h->last_pocs, &h->last_pocs[1],
2170  sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
2171  h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
2172  if (!out_of_order && pics > h->avctx->has_b_frames) {
2173  h->next_output_pic = out;
2174  if (out->mmco_reset) {
2175  if (out_idx > 0) {
2176  h->next_outputed_poc = out->poc;
2177  h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
2178  } else {
2179  h->next_outputed_poc = INT_MIN;
2180  }
2181  } else {
2182  if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f.key_frame) {
2183  h->next_outputed_poc = INT_MIN;
2184  } else {
2185  h->next_outputed_poc = out->poc;
2186  }
2187  }
2188  h->mmco_reset = 0;
2189  } else {
2190  av_log(h->avctx, AV_LOG_DEBUG, "no picture\n");
2191  }
2192 
2193  if (h->next_output_pic) {
2194  if (h->next_output_pic->recovered) {
2195  // We have reached an recovery point and all frames after it in
2196  // display order are "recovered".
2198  }
2200  }
2201 
2202  if (setup_finished && !h->avctx->hwaccel)
2204 }
2205 
2207  uint8_t *src_cb, uint8_t *src_cr,
2208  int linesize, int uvlinesize,
2209  int simple)
2210 {
2211  uint8_t *top_border;
2212  int top_idx = 1;
2213  const int pixel_shift = h->pixel_shift;
2214  int chroma444 = CHROMA444(h);
2215  int chroma422 = CHROMA422(h);
2216 
2217  src_y -= linesize;
2218  src_cb -= uvlinesize;
2219  src_cr -= uvlinesize;
2220 
2221  if (!simple && FRAME_MBAFF(h)) {
2222  if (h->mb_y & 1) {
2223  if (!MB_MBAFF(h)) {
2224  top_border = h->top_borders[0][h->mb_x];
2225  AV_COPY128(top_border, src_y + 15 * linesize);
2226  if (pixel_shift)
2227  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
2228  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
2229  if (chroma444) {
2230  if (pixel_shift) {
2231  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
2232  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
2233  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
2234  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
2235  } else {
2236  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
2237  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
2238  }
2239  } else if (chroma422) {
2240  if (pixel_shift) {
2241  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
2242  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
2243  } else {
2244  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
2245  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
2246  }
2247  } else {
2248  if (pixel_shift) {
2249  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
2250  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
2251  } else {
2252  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
2253  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
2254  }
2255  }
2256  }
2257  }
2258  } else if (MB_MBAFF(h)) {
2259  top_idx = 0;
2260  } else
2261  return;
2262  }
2263 
2264  top_border = h->top_borders[top_idx][h->mb_x];
2265  /* There are two lines saved, the line above the top macroblock
2266  * of a pair, and the line above the bottom macroblock. */
2267  AV_COPY128(top_border, src_y + 16 * linesize);
2268  if (pixel_shift)
2269  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
2270 
2271  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
2272  if (chroma444) {
2273  if (pixel_shift) {
2274  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
2275  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
2276  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
2277  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
2278  } else {
2279  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
2280  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
2281  }
2282  } else if (chroma422) {
2283  if (pixel_shift) {
2284  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
2285  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
2286  } else {
2287  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
2288  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
2289  }
2290  } else {
2291  if (pixel_shift) {
2292  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
2293  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
2294  } else {
2295  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
2296  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
2297  }
2298  }
2299  }
2300 }
2301 
2303  uint8_t *src_cb, uint8_t *src_cr,
2304  int linesize, int uvlinesize,
2305  int xchg, int chroma444,
2306  int simple, int pixel_shift)
2307 {
2308  int deblock_topleft;
2309  int deblock_top;
2310  int top_idx = 1;
2311  uint8_t *top_border_m1;
2312  uint8_t *top_border;
2313 
2314  if (!simple && FRAME_MBAFF(h)) {
2315  if (h->mb_y & 1) {
2316  if (!MB_MBAFF(h))
2317  return;
2318  } else {
2319  top_idx = MB_MBAFF(h) ? 0 : 1;
2320  }
2321  }
2322 
2323  if (h->deblocking_filter == 2) {
2324  deblock_topleft = h->slice_table[h->mb_xy - 1 - h->mb_stride] == h->slice_num;
2325  deblock_top = h->top_type;
2326  } else {
2327  deblock_topleft = (h->mb_x > 0);
2328  deblock_top = (h->mb_y > !!MB_FIELD(h));
2329  }
2330 
2331  src_y -= linesize + 1 + pixel_shift;
2332  src_cb -= uvlinesize + 1 + pixel_shift;
2333  src_cr -= uvlinesize + 1 + pixel_shift;
2334 
2335  top_border_m1 = h->top_borders[top_idx][h->mb_x - 1];
2336  top_border = h->top_borders[top_idx][h->mb_x];
2337 
2338 #define XCHG(a, b, xchg) \
2339  if (pixel_shift) { \
2340  if (xchg) { \
2341  AV_SWAP64(b + 0, a + 0); \
2342  AV_SWAP64(b + 8, a + 8); \
2343  } else { \
2344  AV_COPY128(b, a); \
2345  } \
2346  } else if (xchg) \
2347  AV_SWAP64(b, a); \
2348  else \
2349  AV_COPY64(b, a);
2350 
2351  if (deblock_top) {
2352  if (deblock_topleft) {
2353  XCHG(top_border_m1 + (8 << pixel_shift),
2354  src_y - (7 << pixel_shift), 1);
2355  }
2356  XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
2357  XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
2358  if (h->mb_x + 1 < h->mb_width) {
2359  XCHG(h->top_borders[top_idx][h->mb_x + 1],
2360  src_y + (17 << pixel_shift), 1);
2361  }
2362  }
2363  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
2364  if (chroma444) {
2365  if (deblock_top) {
2366  if (deblock_topleft) {
2367  XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
2368  XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
2369  }
2370  XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
2371  XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
2372  XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
2373  XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
2374  if (h->mb_x + 1 < h->mb_width) {
2375  XCHG(h->top_borders[top_idx][h->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
2376  XCHG(h->top_borders[top_idx][h->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
2377  }
2378  }
2379  } else {
2380  if (deblock_top) {
2381  if (deblock_topleft) {
2382  XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
2383  XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
2384  }
2385  XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
2386  XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
2387  }
2388  }
2389  }
2390 }
2391 
2392 static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth,
2393  int index)
2394 {
2395  if (high_bit_depth) {
2396  return AV_RN32A(((int32_t *)mb) + index);
2397  } else
2398  return AV_RN16A(mb + index);
2399 }
2400 
2401 static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth,
2402  int index, int value)
2403 {
2404  if (high_bit_depth) {
2405  AV_WN32A(((int32_t *)mb) + index, value);
2406  } else
2407  AV_WN16A(mb + index, value);
2408 }
2409 
2411  int mb_type, int is_h264,
2412  int simple,
2413  int transform_bypass,
2414  int pixel_shift,
2415  int *block_offset,
2416  int linesize,
2417  uint8_t *dest_y, int p)
2418 {
2419  void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
2420  void (*idct_dc_add)(uint8_t *dst, int16_t *block, int stride);
2421  int i;
2422  int qscale = p == 0 ? h->qscale : h->chroma_qp[p - 1];
2423  block_offset += 16 * p;
2424  if (IS_INTRA4x4(mb_type)) {
2425  if (IS_8x8DCT(mb_type)) {
2426  if (transform_bypass) {
2427  idct_dc_add =
2429  } else {
2430  idct_dc_add = h->h264dsp.h264_idct8_dc_add;
2432  }
2433  for (i = 0; i < 16; i += 4) {
2434  uint8_t *const ptr = dest_y + block_offset[i];
2435  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
2436  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
2437  h->hpc.pred8x8l_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2438  } else {
2439  const int nnz = h->non_zero_count_cache[scan8[i + p * 16]];
2440  h->hpc.pred8x8l[dir](ptr, (h->topleft_samples_available << i) & 0x8000,
2441  (h->topright_samples_available << i) & 0x4000, linesize);
2442  if (nnz) {
2443  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2444  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2445  else
2446  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2447  }
2448  }
2449  }
2450  } else {
2451  if (transform_bypass) {
2452  idct_dc_add =
2454  } else {
2455  idct_dc_add = h->h264dsp.h264_idct_dc_add;
2457  }
2458  for (i = 0; i < 16; i++) {
2459  uint8_t *const ptr = dest_y + block_offset[i];
2460  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
2461 
2462  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
2463  h->hpc.pred4x4_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2464  } else {
2465  uint8_t *topright;
2466  int nnz, tr;
2467  uint64_t tr_high;
2468  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
2469  const int topright_avail = (h->topright_samples_available << i) & 0x8000;
2470  assert(h->mb_y || linesize <= block_offset[i]);
2471  if (!topright_avail) {
2472  if (pixel_shift) {
2473  tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
2474  topright = (uint8_t *)&tr_high;
2475  } else {
2476  tr = ptr[3 - linesize] * 0x01010101u;
2477  topright = (uint8_t *)&tr;
2478  }
2479  } else
2480  topright = ptr + (4 << pixel_shift) - linesize;
2481  } else
2482  topright = NULL;
2483 
2484  h->hpc.pred4x4[dir](ptr, topright, linesize);
2485  nnz = h->non_zero_count_cache[scan8[i + p * 16]];
2486  if (nnz) {
2487  if (is_h264) {
2488  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2489  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2490  else
2491  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2492  } else if (CONFIG_SVQ3_DECODER)
2493  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize, qscale, 0);
2494  }
2495  }
2496  }
2497  }
2498  } else {
2499  h->hpc.pred16x16[h->intra16x16_pred_mode](dest_y, linesize);
2500  if (is_h264) {
2502  if (!transform_bypass)
2503  h->h264dsp.h264_luma_dc_dequant_idct(h->mb + (p * 256 << pixel_shift),
2504  h->mb_luma_dc[p],
2505  h->dequant4_coeff[p][qscale][0]);
2506  else {
2507  static const uint8_t dc_mapping[16] = {
2508  0 * 16, 1 * 16, 4 * 16, 5 * 16,
2509  2 * 16, 3 * 16, 6 * 16, 7 * 16,
2510  8 * 16, 9 * 16, 12 * 16, 13 * 16,
2511  10 * 16, 11 * 16, 14 * 16, 15 * 16
2512  };
2513  for (i = 0; i < 16; i++)
2514  dctcoef_set(h->mb + (p * 256 << pixel_shift),
2515  pixel_shift, dc_mapping[i],
2516  dctcoef_get(h->mb_luma_dc[p],
2517  pixel_shift, i));
2518  }
2519  }
2520  } else if (CONFIG_SVQ3_DECODER)
2521  ff_svq3_luma_dc_dequant_idct_c(h->mb + p * 256,
2522  h->mb_luma_dc[p], qscale);
2523  }
2524 }
2525 
2527  int is_h264, int simple,
2528  int transform_bypass,
2529  int pixel_shift,
2530  int *block_offset,
2531  int linesize,
2532  uint8_t *dest_y, int p)
2533 {
2534  void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
2535  int i;
2536  block_offset += 16 * p;
2537  if (!IS_INTRA4x4(mb_type)) {
2538  if (is_h264) {
2539  if (IS_INTRA16x16(mb_type)) {
2540  if (transform_bypass) {
2541  if (h->sps.profile_idc == 244 &&
2544  h->hpc.pred16x16_add[h->intra16x16_pred_mode](dest_y, block_offset,
2545  h->mb + (p * 256 << pixel_shift),
2546  linesize);
2547  } else {
2548  for (i = 0; i < 16; i++)
2549  if (h->non_zero_count_cache[scan8[i + p * 16]] ||
2550  dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2551  h->h264dsp.h264_add_pixels4_clear(dest_y + block_offset[i],
2552  h->mb + (i * 16 + p * 256 << pixel_shift),
2553  linesize);
2554  }
2555  } else {
2556  h->h264dsp.h264_idct_add16intra(dest_y, block_offset,
2557  h->mb + (p * 256 << pixel_shift),
2558  linesize,
2559  h->non_zero_count_cache + p * 5 * 8);
2560  }
2561  } else if (h->cbp & 15) {
2562  if (transform_bypass) {
2563  const int di = IS_8x8DCT(mb_type) ? 4 : 1;
2566  for (i = 0; i < 16; i += di)
2567  if (h->non_zero_count_cache[scan8[i + p * 16]])
2568  idct_add(dest_y + block_offset[i],
2569  h->mb + (i * 16 + p * 256 << pixel_shift),
2570  linesize);
2571  } else {
2572  if (IS_8x8DCT(mb_type))
2573  h->h264dsp.h264_idct8_add4(dest_y, block_offset,
2574  h->mb + (p * 256 << pixel_shift),
2575  linesize,
2576  h->non_zero_count_cache + p * 5 * 8);
2577  else
2578  h->h264dsp.h264_idct_add16(dest_y, block_offset,
2579  h->mb + (p * 256 << pixel_shift),
2580  linesize,
2581  h->non_zero_count_cache + p * 5 * 8);
2582  }
2583  }
2584  } else if (CONFIG_SVQ3_DECODER) {
2585  for (i = 0; i < 16; i++)
2586  if (h->non_zero_count_cache[scan8[i + p * 16]] || h->mb[i * 16 + p * 256]) {
2587  // FIXME benchmark weird rule, & below
2588  uint8_t *const ptr = dest_y + block_offset[i];
2589  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize,
2590  h->qscale, IS_INTRA(mb_type) ? 1 : 0);
2591  }
2592  }
2593  }
2594 }
2595 
2596 #define BITS 8
2597 #define SIMPLE 1
2598 #include "h264_mb_template.c"
2599 
2600 #undef BITS
2601 #define BITS 16
2602 #include "h264_mb_template.c"
2603 
2604 #undef SIMPLE
2605 #define SIMPLE 0
2606 #include "h264_mb_template.c"
2607 
2609 {
2610  const int mb_xy = h->mb_xy;
2611  const int mb_type = h->cur_pic.mb_type[mb_xy];
2612  int is_complex = CONFIG_SMALL || h->is_complex ||
2613  IS_INTRA_PCM(mb_type) || h->qscale == 0;
2614 
2615  if (CHROMA444(h)) {
2616  if (is_complex || h->pixel_shift)
2617  hl_decode_mb_444_complex(h);
2618  else
2619  hl_decode_mb_444_simple_8(h);
2620  } else if (is_complex) {
2621  hl_decode_mb_complex(h);
2622  } else if (h->pixel_shift) {
2623  hl_decode_mb_simple_16(h);
2624  } else
2625  hl_decode_mb_simple_8(h);
2626 }
2627 
2629 {
2630  int list, i;
2631  int luma_def, chroma_def;
2632 
2633  h->use_weight = 0;
2634  h->use_weight_chroma = 0;
2636  if (h->sps.chroma_format_idc)
2638  luma_def = 1 << h->luma_log2_weight_denom;
2639  chroma_def = 1 << h->chroma_log2_weight_denom;
2640 
2641  for (list = 0; list < 2; list++) {
2642  h->luma_weight_flag[list] = 0;
2643  h->chroma_weight_flag[list] = 0;
2644  for (i = 0; i < h->ref_count[list]; i++) {
2645  int luma_weight_flag, chroma_weight_flag;
2646 
2647  luma_weight_flag = get_bits1(&h->gb);
2648  if (luma_weight_flag) {
2649  h->luma_weight[i][list][0] = get_se_golomb(&h->gb);
2650  h->luma_weight[i][list][1] = get_se_golomb(&h->gb);
2651  if (h->luma_weight[i][list][0] != luma_def ||
2652  h->luma_weight[i][list][1] != 0) {
2653  h->use_weight = 1;
2654  h->luma_weight_flag[list] = 1;
2655  }
2656  } else {
2657  h->luma_weight[i][list][0] = luma_def;
2658  h->luma_weight[i][list][1] = 0;
2659  }
2660 
2661  if (h->sps.chroma_format_idc) {
2662  chroma_weight_flag = get_bits1(&h->gb);
2663  if (chroma_weight_flag) {
2664  int j;
2665  for (j = 0; j < 2; j++) {
2666  h->chroma_weight[i][list][j][0] = get_se_golomb(&h->gb);
2667  h->chroma_weight[i][list][j][1] = get_se_golomb(&h->gb);
2668  if (h->chroma_weight[i][list][j][0] != chroma_def ||
2669  h->chroma_weight[i][list][j][1] != 0) {
2670  h->use_weight_chroma = 1;
2671  h->chroma_weight_flag[list] = 1;
2672  }
2673  }
2674  } else {
2675  int j;
2676  for (j = 0; j < 2; j++) {
2677  h->chroma_weight[i][list][j][0] = chroma_def;
2678  h->chroma_weight[i][list][j][1] = 0;
2679  }
2680  }
2681  }
2682  }
2684  break;
2685  }
2686  h->use_weight = h->use_weight || h->use_weight_chroma;
2687  return 0;
2688 }
2689 
2695 static void implicit_weight_table(H264Context *h, int field)
2696 {
2697  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
2698 
2699  for (i = 0; i < 2; i++) {
2700  h->luma_weight_flag[i] = 0;
2701  h->chroma_weight_flag[i] = 0;
2702  }
2703 
2704  if (field < 0) {
2705  if (h->picture_structure == PICT_FRAME) {
2706  cur_poc = h->cur_pic_ptr->poc;
2707  } else {
2708  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
2709  }
2710  if (h->ref_count[0] == 1 && h->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
2711  h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2 * cur_poc) {
2712  h->use_weight = 0;
2713  h->use_weight_chroma = 0;
2714  return;
2715  }
2716  ref_start = 0;
2717  ref_count0 = h->ref_count[0];
2718  ref_count1 = h->ref_count[1];
2719  } else {
2720  cur_poc = h->cur_pic_ptr->field_poc[field];
2721  ref_start = 16;
2722  ref_count0 = 16 + 2 * h->ref_count[0];
2723  ref_count1 = 16 + 2 * h->ref_count[1];
2724  }
2725 
2726  h->use_weight = 2;
2727  h->use_weight_chroma = 2;
2728  h->luma_log2_weight_denom = 5;
2729  h->chroma_log2_weight_denom = 5;
2730 
2731  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
2732  int poc0 = h->ref_list[0][ref0].poc;
2733  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
2734  int w = 32;
2735  if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) {
2736  int poc1 = h->ref_list[1][ref1].poc;
2737  int td = av_clip(poc1 - poc0, -128, 127);
2738  if (td) {
2739  int tb = av_clip(cur_poc - poc0, -128, 127);
2740  int tx = (16384 + (FFABS(td) >> 1)) / td;
2741  int dist_scale_factor = (tb * tx + 32) >> 8;
2742  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
2743  w = 64 - dist_scale_factor;
2744  }
2745  }
2746  if (field < 0) {
2747  h->implicit_weight[ref0][ref1][0] =
2748  h->implicit_weight[ref0][ref1][1] = w;
2749  } else {
2750  h->implicit_weight[ref0][ref1][field] = w;
2751  }
2752  }
2753  }
2754 }
2755 
2759 static void idr(H264Context *h)
2760 {
2762  h->prev_frame_num = 0;
2763  h->prev_frame_num_offset = 0;
2764  h->prev_poc_msb =
2765  h->prev_poc_lsb = 0;
2766 }
2767 
2768 /* forget old pics after a seek */
2769 static void flush_change(H264Context *h)
2770 {
2771  int i;
2772  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
2773  h->last_pocs[i] = INT_MIN;
2774  h->outputed_poc = h->next_outputed_poc = INT_MIN;
2775  h->prev_interlaced_frame = 1;
2776  idr(h);
2777  if (h->cur_pic_ptr)
2778  h->cur_pic_ptr->reference = 0;
2779  h->first_field = 0;
2780  memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
2781  memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
2782  memset(h->default_ref_list[0], 0, sizeof(h->default_ref_list[0]));
2783  memset(h->default_ref_list[1], 0, sizeof(h->default_ref_list[1]));
2784  ff_h264_reset_sei(h);
2785  h->recovery_frame = -1;
2786  h->frame_recovered = 0;
2787 }
2788 
2789 /* forget old pics after a seek */
2790 static void flush_dpb(AVCodecContext *avctx)
2791 {
2792  H264Context *h = avctx->priv_data;
2793  int i;
2794 
2795  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
2796  if (h->delayed_pic[i])
2797  h->delayed_pic[i]->reference = 0;
2798  h->delayed_pic[i] = NULL;
2799  }
2800 
2801  flush_change(h);
2802 
2803  if (h->DPB)
2804  for (i = 0; i < MAX_PICTURE_COUNT; i++)
2805  unref_picture(h, &h->DPB[i]);
2806  h->cur_pic_ptr = NULL;
2807  unref_picture(h, &h->cur_pic);
2808 
2809  h->mb_x = h->mb_y = 0;
2810 
2811  h->parse_context.state = -1;
2813  h->parse_context.overread = 0;
2815  h->parse_context.index = 0;
2816  h->parse_context.last_index = 0;
2817 
2818  free_tables(h, 1);
2819  h->context_initialized = 0;
2820 }
2821 
2822 int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
2823 {
2824  const int max_frame_num = 1 << h->sps.log2_max_frame_num;
2825  int field_poc[2];
2826 
2828  if (h->frame_num < h->prev_frame_num)
2829  h->frame_num_offset += max_frame_num;
2830 
2831  if (h->sps.poc_type == 0) {
2832  const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
2833 
2834  if (h->poc_lsb < h->prev_poc_lsb &&
2835  h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
2836  h->poc_msb = h->prev_poc_msb + max_poc_lsb;
2837  else if (h->poc_lsb > h->prev_poc_lsb &&
2838  h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
2839  h->poc_msb = h->prev_poc_msb - max_poc_lsb;
2840  else
2841  h->poc_msb = h->prev_poc_msb;
2842  field_poc[0] =
2843  field_poc[1] = h->poc_msb + h->poc_lsb;
2844  if (h->picture_structure == PICT_FRAME)
2845  field_poc[1] += h->delta_poc_bottom;
2846  } else if (h->sps.poc_type == 1) {
2847  int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
2848  int i;
2849 
2850  if (h->sps.poc_cycle_length != 0)
2851  abs_frame_num = h->frame_num_offset + h->frame_num;
2852  else
2853  abs_frame_num = 0;
2854 
2855  if (h->nal_ref_idc == 0 && abs_frame_num > 0)
2856  abs_frame_num--;
2857 
2858  expected_delta_per_poc_cycle = 0;
2859  for (i = 0; i < h->sps.poc_cycle_length; i++)
2860  // FIXME integrate during sps parse
2861  expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
2862 
2863  if (abs_frame_num > 0) {
2864  int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
2865  int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
2866 
2867  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
2868  for (i = 0; i <= frame_num_in_poc_cycle; i++)
2869  expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
2870  } else
2871  expectedpoc = 0;
2872 
2873  if (h->nal_ref_idc == 0)
2874  expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
2875 
2876  field_poc[0] = expectedpoc + h->delta_poc[0];
2877  field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
2878 
2879  if (h->picture_structure == PICT_FRAME)
2880  field_poc[1] += h->delta_poc[1];
2881  } else {
2882  int poc = 2 * (h->frame_num_offset + h->frame_num);
2883 
2884  if (!h->nal_ref_idc)
2885  poc--;
2886 
2887  field_poc[0] = poc;
2888  field_poc[1] = poc;
2889  }
2890 
2892  pic_field_poc[0] = field_poc[0];
2894  pic_field_poc[1] = field_poc[1];
2895  *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
2896 
2897  return 0;
2898 }
2899 
2904 {
2905  int i;
2906  for (i = 0; i < 16; i++) {
2907 #define T(x) (x >> 2) | ((x << 2) & 0xF)
2908  h->zigzag_scan[i] = T(zigzag_scan[i]);
2909  h->field_scan[i] = T(field_scan[i]);
2910 #undef T
2911  }
2912  for (i = 0; i < 64; i++) {
2913 #define T(x) (x >> 3) | ((x & 7) << 3)
2914  h->zigzag_scan8x8[i] = T(ff_zigzag_direct[i]);
2916  h->field_scan8x8[i] = T(field_scan8x8[i]);
2918 #undef T
2919  }
2920  if (h->sps.transform_bypass) { // FIXME same ugly
2927  } else {
2928  h->zigzag_scan_q0 = h->zigzag_scan;
2931  h->field_scan_q0 = h->field_scan;
2934  }
2935 }
2936 
2937 static int field_end(H264Context *h, int in_setup)
2938 {
2939  AVCodecContext *const avctx = h->avctx;
2940  int err = 0;
2941  h->mb_y = 0;
2942 
2943  if (!in_setup && !h->droppable)
2946 
2947  if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
2948  if (!h->droppable) {
2950  h->prev_poc_msb = h->poc_msb;
2951  h->prev_poc_lsb = h->poc_lsb;
2952  }
2954  h->prev_frame_num = h->frame_num;
2956  }
2957 
2958  if (avctx->hwaccel) {
2959  if (avctx->hwaccel->end_frame(avctx) < 0)
2960  av_log(avctx, AV_LOG_ERROR,
2961  "hardware accelerator failed to decode picture\n");
2962  }
2963 
2964  /*
2965  * FIXME: Error handling code does not seem to support interlaced
2966  * when slices span multiple rows
2967  * The ff_er_add_slice calls don't work right for bottom
2968  * fields; they cause massive erroneous error concealing
2969  * Error marking covers both fields (top and bottom).
2970  * This causes a mismatched s->error_count
2971  * and a bad error table. Further, the error count goes to
2972  * INT_MAX when called for bottom field, because mb_y is
2973  * past end by one (callers fault) and resync_mb_y != 0
2974  * causes problems for the first MB line, too.
2975  */
2977  h->er.cur_pic = h->cur_pic_ptr;
2978  h->er.last_pic = h->ref_count[0] ? &h->ref_list[0][0] : NULL;
2979  h->er.next_pic = h->ref_count[1] ? &h->ref_list[1][0] : NULL;
2980  ff_er_frame_end(&h->er);
2981  }
2982  emms_c();
2983 
2984  h->current_slice = 0;
2985 
2986  return err;
2987 }
2988 
2992 static int clone_slice(H264Context *dst, H264Context *src)
2993 {
2994  memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset));
2995  dst->cur_pic_ptr = src->cur_pic_ptr;
2996  dst->cur_pic = src->cur_pic;
2997  dst->linesize = src->linesize;
2998  dst->uvlinesize = src->uvlinesize;
2999  dst->first_field = src->first_field;
3000 
3001  dst->prev_poc_msb = src->prev_poc_msb;
3002  dst->prev_poc_lsb = src->prev_poc_lsb;
3004  dst->prev_frame_num = src->prev_frame_num;
3005  dst->short_ref_count = src->short_ref_count;
3006 
3007  memcpy(dst->short_ref, src->short_ref, sizeof(dst->short_ref));
3008  memcpy(dst->long_ref, src->long_ref, sizeof(dst->long_ref));
3009  memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list));
3010 
3011  memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff));
3012  memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff));
3013 
3014  return 0;
3015 }
3016 
3025 {
3026  int profile = sps->profile_idc;
3027 
3028  switch (sps->profile_idc) {
3030  // constraint_set1_flag set to 1
3031  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
3032  break;
3036  // constraint_set3_flag set to 1
3037  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
3038  break;
3039  }
3040 
3041  return profile;
3042 }
3043 
3045 {
3046  if (h->flags & CODEC_FLAG_LOW_DELAY ||
3048  !h->sps.num_reorder_frames)) {
3049  if (h->avctx->has_b_frames > 1 || h->delayed_pic[0])
3050  av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. "
3051  "Reenabling low delay requires a codec flush.\n");
3052  else
3053  h->low_delay = 1;
3054  }
3055 
3056  if (h->avctx->has_b_frames < 2)
3057  h->avctx->has_b_frames = !h->low_delay;
3058 
3059  if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
3061  if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
3064  h->pixel_shift = h->sps.bit_depth_luma > 8;
3065 
3067  h->sps.chroma_format_idc);
3071  h->sps.chroma_format_idc);
3073  ff_dsputil_init(&h->dsp, h->avctx);
3075  } else {
3076  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
3077  h->sps.bit_depth_luma);
3078  return AVERROR_INVALIDDATA;
3079  }
3080  }
3081  return 0;
3082 }
3083 
3085 {
3086  switch (h->sps.bit_depth_luma) {
3087  case 9:
3088  if (CHROMA444(h)) {
3089  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
3090  return AV_PIX_FMT_GBRP9;
3091  } else
3092  return AV_PIX_FMT_YUV444P9;
3093  } else if (CHROMA422(h))
3094  return AV_PIX_FMT_YUV422P9;
3095  else
3096  return AV_PIX_FMT_YUV420P9;
3097  break;
3098  case 10:
3099  if (CHROMA444(h)) {
3100  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
3101  return AV_PIX_FMT_GBRP10;
3102  } else
3103  return AV_PIX_FMT_YUV444P10;
3104  } else if (CHROMA422(h))
3105  return AV_PIX_FMT_YUV422P10;
3106  else
3107  return AV_PIX_FMT_YUV420P10;
3108  break;
3109  case 8:
3110  if (CHROMA444(h)) {
3111  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
3112  return AV_PIX_FMT_GBRP;
3113  } else
3116  } else if (CHROMA422(h)) {
3119  } else {
3120  return h->avctx->get_format(h->avctx, h->avctx->codec->pix_fmts ?
3121  h->avctx->codec->pix_fmts :
3125  }
3126  break;
3127  default:
3129  "Unsupported bit depth %d\n", h->sps.bit_depth_luma);
3130  return AVERROR_INVALIDDATA;
3131  }
3132 }
3133 
3134 /* export coded and cropped frame dimensions to AVCodecContext */
3136 {
3137  int width = h->width - (h->sps.crop_right + h->sps.crop_left);
3138  int height = h->height - (h->sps.crop_top + h->sps.crop_bottom);
3139 
3140  /* handle container cropping */
3141  if (!h->sps.crop &&
3142  FFALIGN(h->avctx->width, 16) == h->width &&
3143  FFALIGN(h->avctx->height, 16) == h->height) {
3144  width = h->avctx->width;
3145  height = h->avctx->height;
3146  }
3147 
3148  if (width <= 0 || height <= 0) {
3149  av_log(h->avctx, AV_LOG_ERROR, "Invalid cropped dimensions: %dx%d.\n",
3150  width, height);
3152  return AVERROR_INVALIDDATA;
3153 
3154  av_log(h->avctx, AV_LOG_WARNING, "Ignoring cropping information.\n");
3155  h->sps.crop_bottom = h->sps.crop_top = h->sps.crop_right = h->sps.crop_left = 0;
3156  h->sps.crop = 0;
3157 
3158  width = h->width;
3159  height = h->height;
3160  }
3161 
3162  h->avctx->coded_width = h->width;
3163  h->avctx->coded_height = h->height;
3164  h->avctx->width = width;
3165  h->avctx->height = height;
3166 
3167  return 0;
3168 }
3169 
3170 static int h264_slice_header_init(H264Context *h, int reinit)
3171 {
3172  int nb_slices = (HAVE_THREADS &&
3174  h->avctx->thread_count : 1;
3175  int i, ret;
3176 
3177  h->avctx->sample_aspect_ratio = h->sps.sar;
3180  &h->chroma_x_shift, &h->chroma_y_shift);
3181 
3182  if (h->sps.timing_info_present_flag) {
3183  int64_t den = h->sps.time_scale;
3184  if (h->x264_build < 44U)
3185  den *= 2;
3187  h->sps.num_units_in_tick, den, 1 << 30);
3188  }
3189 
3190  h->avctx->hwaccel = ff_find_hwaccel(h->avctx);
3191 
3192  if (reinit)
3193  free_tables(h, 0);
3194  h->first_field = 0;
3195  h->prev_interlaced_frame = 1;
3196 
3197  init_scan_tables(h);
3198  ret = ff_h264_alloc_tables(h);
3199  if (ret < 0) {
3200  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
3201  return ret;
3202  }
3203 
3204  if (nb_slices > MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) {
3205  int max_slices;
3206  if (h->mb_height)
3207  max_slices = FFMIN(MAX_THREADS, h->mb_height);
3208  else
3209  max_slices = MAX_THREADS;
3210  av_log(h->avctx, AV_LOG_WARNING, "too many threads/slices %d,"
3211  " reducing to %d\n", nb_slices, max_slices);
3212  nb_slices = max_slices;
3213  }
3214  h->slice_context_count = nb_slices;
3215 
3217  ret = context_init(h);
3218  if (ret < 0) {
3219  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
3220  return ret;
3221  }
3222  } else {
3223  for (i = 1; i < h->slice_context_count; i++) {
3224  H264Context *c;
3225  c = h->thread_context[i] = av_mallocz(sizeof(H264Context));
3226  if (!c)
3227  return AVERROR(ENOMEM);
3228  c->avctx = h->avctx;
3229  c->dsp = h->dsp;
3230  c->vdsp = h->vdsp;
3231  c->h264dsp = h->h264dsp;
3232  c->h264qpel = h->h264qpel;
3233  c->h264chroma = h->h264chroma;
3234  c->sps = h->sps;
3235  c->pps = h->pps;
3236  c->pixel_shift = h->pixel_shift;
3237  c->width = h->width;
3238  c->height = h->height;
3239  c->linesize = h->linesize;
3240  c->uvlinesize = h->uvlinesize;
3243  c->qscale = h->qscale;
3244  c->droppable = h->droppable;
3246  c->low_delay = h->low_delay;
3247  c->mb_width = h->mb_width;
3248  c->mb_height = h->mb_height;
3249  c->mb_stride = h->mb_stride;
3250  c->mb_num = h->mb_num;
3251  c->flags = h->flags;
3253  c->pict_type = h->pict_type;
3254 
3255  init_scan_tables(c);
3256  clone_tables(c, h, i);
3257  c->context_initialized = 1;
3258  }
3259 
3260  for (i = 0; i < h->slice_context_count; i++)
3261  if ((ret = context_init(h->thread_context[i])) < 0) {
3262  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
3263  return ret;
3264  }
3265  }
3266 
3267  h->context_initialized = 1;
3268 
3269  return 0;
3270 }
3271 
3273 {
3274  int ref_count[2], list_count;
3275  int num_ref_idx_active_override_flag, max_refs;
3276 
3277  // set defaults, might be overridden a few lines later
3278  ref_count[0] = h->pps.ref_count[0];
3279  ref_count[1] = h->pps.ref_count[1];
3280 
3281  if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
3284  num_ref_idx_active_override_flag = get_bits1(&h->gb);
3285 
3286  if (num_ref_idx_active_override_flag) {
3287  ref_count[0] = get_ue_golomb(&h->gb) + 1;
3288  if (ref_count[0] < 1)
3289  return AVERROR_INVALIDDATA;
3290  if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
3291  ref_count[1] = get_ue_golomb(&h->gb) + 1;
3292  if (ref_count[1] < 1)
3293  return AVERROR_INVALIDDATA;
3294  }
3295  }
3296 
3298  list_count = 2;
3299  else
3300  list_count = 1;
3301  } else {
3302  list_count = 0;
3303  ref_count[0] = ref_count[1] = 0;
3304  }
3305 
3306  max_refs = h->picture_structure == PICT_FRAME ? 16 : 32;
3307 
3308  if (ref_count[0] > max_refs || ref_count[1] > max_refs) {
3309  av_log(h->avctx, AV_LOG_ERROR, "reference overflow\n");
3310  h->ref_count[0] = h->ref_count[1] = 0;
3311  return AVERROR_INVALIDDATA;
3312  }
3313 
3314  if (list_count != h->list_count ||
3315  ref_count[0] != h->ref_count[0] ||
3316  ref_count[1] != h->ref_count[1]) {
3317  h->ref_count[0] = ref_count[0];
3318  h->ref_count[1] = ref_count[1];
3319  h->list_count = list_count;
3320  return 1;
3321  }
3322 
3323  return 0;
3324 }
3325 
3337 {
3338  unsigned int first_mb_in_slice;
3339  unsigned int pps_id;
3340  int ret;
3341  unsigned int slice_type, tmp, i, j;
3342  int default_ref_list_done = 0;
3343  int last_pic_structure, last_pic_droppable;
3344  int needs_reinit = 0;
3345  int field_pic_flag, bottom_field_flag;
3346 
3349 
3350  first_mb_in_slice = get_ue_golomb(&h->gb);
3351 
3352  if (first_mb_in_slice == 0) { // FIXME better field boundary detection
3353  if (h0->current_slice && h->cur_pic_ptr && FIELD_PICTURE(h)) {
3354  field_end(h, 1);
3355  }
3356 
3357  h0->current_slice = 0;
3358  if (!h0->first_field) {
3359  if (h->cur_pic_ptr && !h->droppable) {
3362  }
3363  h->cur_pic_ptr = NULL;
3364  }
3365  }
3366 
3367  slice_type = get_ue_golomb_31(&h->gb);
3368  if (slice_type > 9) {
3370  "slice type %d too large at %d %d\n",
3371  h->slice_type, h->mb_x, h->mb_y);
3372  return AVERROR_INVALIDDATA;
3373  }
3374  if (slice_type > 4) {
3375  slice_type -= 5;
3376  h->slice_type_fixed = 1;
3377  } else
3378  h->slice_type_fixed = 0;
3379 
3380  slice_type = golomb_to_pict_type[slice_type];
3381  if (slice_type == AV_PICTURE_TYPE_I ||
3382  (h0->current_slice != 0 && slice_type == h0->last_slice_type)) {
3383  default_ref_list_done = 1;
3384  }
3385  h->slice_type = slice_type;
3386  h->slice_type_nos = slice_type & 3;
3387 
3388  if (h->nal_unit_type == NAL_IDR_SLICE &&
3390  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
3391  return AVERROR_INVALIDDATA;
3392  }
3393 
3394  // to make a few old functions happy, it's wrong though
3395  h->pict_type = h->slice_type;
3396 
3397  pps_id = get_ue_golomb(&h->gb);
3398  if (pps_id >= MAX_PPS_COUNT) {
3399  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id);
3400  return AVERROR_INVALIDDATA;
3401  }
3402  if (!h0->pps_buffers[pps_id]) {
3404  "non-existing PPS %u referenced\n",
3405  pps_id);
3406  return AVERROR_INVALIDDATA;
3407  }
3408  h->pps = *h0->pps_buffers[pps_id];
3409 
3410  if (!h0->sps_buffers[h->pps.sps_id]) {
3412  "non-existing SPS %u referenced\n",
3413  h->pps.sps_id);
3414  return AVERROR_INVALIDDATA;
3415  }
3416 
3417  if (h->pps.sps_id != h->sps.sps_id ||
3418  h0->sps_buffers[h->pps.sps_id]->new) {
3419  h0->sps_buffers[h->pps.sps_id]->new = 0;
3420 
3421  h->sps = *h0->sps_buffers[h->pps.sps_id];
3422 
3423  if (h->bit_depth_luma != h->sps.bit_depth_luma ||
3427  needs_reinit = 1;
3428  }
3429  if ((ret = h264_set_parameter_from_sps(h)) < 0)
3430  return ret;
3431  }
3432 
3433  h->avctx->profile = ff_h264_get_profile(&h->sps);
3434  h->avctx->level = h->sps.level_idc;
3435  h->avctx->refs = h->sps.ref_frame_count;
3436 
3437  if (h->mb_width != h->sps.mb_width ||
3438  h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag))
3439  needs_reinit = 1;
3440 
3441  h->mb_width = h->sps.mb_width;
3442  h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
3443  h->mb_num = h->mb_width * h->mb_height;
3444  h->mb_stride = h->mb_width + 1;
3445 
3446  h->b_stride = h->mb_width * 4;
3447 
3448  h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
3449 
3450  h->width = 16 * h->mb_width;
3451  h->height = 16 * h->mb_height;
3452 
3453  ret = init_dimensions(h);
3454  if (ret < 0)
3455  return ret;
3456 
3459  : AVCOL_RANGE_MPEG;
3461  if (h->avctx->colorspace != h->sps.colorspace)
3462  needs_reinit = 1;
3464  h->avctx->color_trc = h->sps.color_trc;
3465  h->avctx->colorspace = h->sps.colorspace;
3466  }
3467  }
3468 
3469  if (h->context_initialized &&
3470  (h->width != h->avctx->coded_width ||
3471  h->height != h->avctx->coded_height ||
3472  needs_reinit)) {
3473  if (h != h0) {
3474  av_log(h->avctx, AV_LOG_ERROR, "changing width/height on "
3475  "slice %d\n", h0->current_slice + 1);
3476  return AVERROR_INVALIDDATA;
3477  }
3478 
3479  flush_change(h);
3480 
3481  if ((ret = get_pixel_format(h)) < 0)
3482  return ret;
3483  h->avctx->pix_fmt = ret;
3484 
3485  av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, "
3486  "pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt);
3487 
3488  if ((ret = h264_slice_header_init(h, 1)) < 0) {
3490  "h264_slice_header_init() failed\n");
3491  return ret;
3492  }
3493  }
3494  if (!h->context_initialized) {
3495  if (h != h0) {
3497  "Cannot (re-)initialize context during parallel decoding.\n");
3498  return AVERROR_PATCHWELCOME;
3499  }
3500 
3501  if ((ret = get_pixel_format(h)) < 0)
3502  return ret;
3503  h->avctx->pix_fmt = ret;
3504 
3505  if ((ret = h264_slice_header_init(h, 0)) < 0) {
3507  "h264_slice_header_init() failed\n");
3508  return ret;
3509  }
3510  }
3511 
3512  if (h == h0 && h->dequant_coeff_pps != pps_id) {
3513  h->dequant_coeff_pps = pps_id;
3515  }
3516 
3517  h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
3518 
3519  h->mb_mbaff = 0;
3520  h->mb_aff_frame = 0;
3521  last_pic_structure = h0->picture_structure;
3522  last_pic_droppable = h0->droppable;
3523  h->droppable = h->nal_ref_idc == 0;
3524  if (h->sps.frame_mbs_only_flag) {
3526  } else {
3527  field_pic_flag = get_bits1(&h->gb);
3528  if (field_pic_flag) {
3529  bottom_field_flag = get_bits1(&h->gb);
3530  h->picture_structure = PICT_TOP_FIELD + bottom_field_flag;
3531  } else {
3533  h->mb_aff_frame = h->sps.mb_aff;
3534  }
3535  }
3537 
3538  if (h0->current_slice != 0) {
3539  if (last_pic_structure != h->picture_structure ||
3540  last_pic_droppable != h->droppable) {
3542  "Changing field mode (%d -> %d) between slices is not allowed\n",
3543  last_pic_structure, h->picture_structure);
3544  h->picture_structure = last_pic_structure;
3545  h->droppable = last_pic_droppable;
3546  return AVERROR_INVALIDDATA;
3547  } else if (!h0->cur_pic_ptr) {
3549  "unset cur_pic_ptr on slice %d\n",
3550  h0->current_slice + 1);
3551  return AVERROR_INVALIDDATA;
3552  }
3553  } else {
3554  /* Shorten frame num gaps so we don't have to allocate reference
3555  * frames just to throw them away */
3556  if (h->frame_num != h->prev_frame_num) {
3557  int unwrap_prev_frame_num = h->prev_frame_num;
3558  int max_frame_num = 1 << h->sps.log2_max_frame_num;
3559 
3560  if (unwrap_prev_frame_num > h->frame_num)
3561  unwrap_prev_frame_num -= max_frame_num;
3562 
3563  if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) {
3564  unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1;
3565  if (unwrap_prev_frame_num < 0)
3566  unwrap_prev_frame_num += max_frame_num;
3567 
3568  h->prev_frame_num = unwrap_prev_frame_num;
3569  }
3570  }
3571 
3572  /* See if we have a decoded first field looking for a pair...
3573  * Here, we're using that to see if we should mark previously
3574  * decode frames as "finished".
3575  * We have to do that before the "dummy" in-between frame allocation,
3576  * since that can modify s->current_picture_ptr. */
3577  if (h0->first_field) {
3578  assert(h0->cur_pic_ptr);
3579  assert(h0->cur_pic_ptr->f.buf[0]);
3580  assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
3581 
3582  /* figure out if we have a complementary field pair */
3583  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
3584  /* Previous field is unmatched. Don't display it, but let it
3585  * remain for reference if marked as such. */
3586  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
3587  ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
3588  last_pic_structure == PICT_TOP_FIELD);
3589  }
3590  } else {
3591  if (h0->cur_pic_ptr->frame_num != h->frame_num) {
3592  /* This and previous field were reference, but had
3593  * different frame_nums. Consider this field first in
3594  * pair. Throw away previous field except for reference
3595  * purposes. */
3596  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
3597  ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
3598  last_pic_structure == PICT_TOP_FIELD);
3599  }
3600  } else {
3601  /* Second field in complementary pair */
3602  if (!((last_pic_structure == PICT_TOP_FIELD &&
3604  (last_pic_structure == PICT_BOTTOM_FIELD &&
3607  "Invalid field mode combination %d/%d\n",
3608  last_pic_structure, h->picture_structure);
3609  h->picture_structure = last_pic_structure;
3610  h->droppable = last_pic_droppable;
3611  return AVERROR_INVALIDDATA;
3612  } else if (last_pic_droppable != h->droppable) {
3614  "Found reference and non-reference fields in the same frame, which");
3615  h->picture_structure = last_pic_structure;
3616  h->droppable = last_pic_droppable;
3617  return AVERROR_PATCHWELCOME;
3618  }
3619  }
3620  }
3621  }
3622 
3623  while (h->frame_num != h->prev_frame_num &&
3624  h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
3625  Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
3626  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
3627  h->frame_num, h->prev_frame_num);
3628  ret = h264_frame_start(h);
3629  if (ret < 0) {
3630  h0->first_field = 0;
3631  return ret;
3632  }
3633 
3634  h->prev_frame_num++;
3635  h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
3637  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
3638  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
3640  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
3641  return ret;
3643  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
3644  return ret;
3645  /* Error concealment: If a ref is missing, copy the previous ref
3646  * in its place.
3647  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
3648  * many assumptions about there being no actual duplicates.
3649  * FIXME: This does not copy padding for out-of-frame motion
3650  * vectors. Given we are concealing a lost frame, this probably
3651  * is not noticeable by comparison, but it should be fixed. */
3652  if (h->short_ref_count) {
3653  if (prev) {
3654  av_image_copy(h->short_ref[0]->f.data,
3655  h->short_ref[0]->f.linesize,
3656  (const uint8_t **)prev->f.data,
3657  prev->f.linesize,
3658  h->avctx->pix_fmt,
3659  h->mb_width * 16,
3660  h->mb_height * 16);
3661  h->short_ref[0]->poc = prev->poc + 2;
3662  }
3663  h->short_ref[0]->frame_num = h->prev_frame_num;
3664  }
3665  }
3666 
3667  /* See if we have a decoded first field looking for a pair...
3668  * We're using that to see whether to continue decoding in that
3669  * frame, or to allocate a new one. */
3670  if (h0->first_field) {
3671  assert(h0->cur_pic_ptr);
3672  assert(h0->cur_pic_ptr->f.buf[0]);
3673  assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
3674 
3675  /* figure out if we have a complementary field pair */
3676  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
3677  /* Previous field is unmatched. Don't display it, but let it
3678  * remain for reference if marked as such. */
3679  h0->cur_pic_ptr = NULL;
3680  h0->first_field = FIELD_PICTURE(h);
3681  } else {
3682  if (h0->cur_pic_ptr->frame_num != h->frame_num) {
3683  /* This and the previous field had different frame_nums.
3684  * Consider this field first in pair. Throw away previous
3685  * one except for reference purposes. */
3686  h0->first_field = 1;
3687  h0->cur_pic_ptr = NULL;
3688  } else {
3689  /* Second field in complementary pair */
3690  h0->first_field = 0;
3691  }
3692  }
3693  } else {
3694  /* Frame or first field in a potentially complementary pair */
3695  h0->first_field = FIELD_PICTURE(h);
3696  }
3697 
3698  if (!FIELD_PICTURE(h) || h0->first_field) {
3699  if (h264_frame_start(h) < 0) {
3700  h0->first_field = 0;
3701  return AVERROR_INVALIDDATA;
3702  }
3703  } else {
3705  }
3706  }
3707  if (h != h0 && (ret = clone_slice(h, h0)) < 0)
3708  return ret;
3709 
3710  h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup
3711 
3712  assert(h->mb_num == h->mb_width * h->mb_height);
3713  if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
3714  first_mb_in_slice >= h->mb_num) {
3715  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
3716  return AVERROR_INVALIDDATA;
3717  }
3718  h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width;
3719  h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) <<
3722  h->resync_mb_y = h->mb_y = h->mb_y + 1;
3723  assert(h->mb_y < h->mb_height);
3724 
3725  if (h->picture_structure == PICT_FRAME) {
3726  h->curr_pic_num = h->frame_num;
3727  h->max_pic_num = 1 << h->sps.log2_max_frame_num;
3728  } else {
3729  h->curr_pic_num = 2 * h->frame_num + 1;
3730  h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1);
3731  }
3732 
3733  if (h->nal_unit_type == NAL_IDR_SLICE)
3734  get_ue_golomb(&h->gb); /* idr_pic_id */
3735 
3736  if (h->sps.poc_type == 0) {
3737  h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb);
3738 
3739  if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
3740  h->delta_poc_bottom = get_se_golomb(&h->gb);
3741  }
3742 
3743  if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) {
3744  h->delta_poc[0] = get_se_golomb(&h->gb);
3745 
3746  if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
3747  h->delta_poc[1] = get_se_golomb(&h->gb);
3748  }
3749 
3751 
3754 
3755  ret = ff_set_ref_count(h);
3756  if (ret < 0)
3757  return ret;
3758  else if (ret == 1)
3759  default_ref_list_done = 0;
3760 
3761  if (!default_ref_list_done)
3763 
3764  if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
3766  if (ret < 0) {
3767  h->ref_count[1] = h->ref_count[0] = 0;
3768  return ret;
3769  }
3770  }
3771 
3772  if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) ||
3773  (h->pps.weighted_bipred_idc == 1 &&
3776  else if (h->pps.weighted_bipred_idc == 2 &&
3778  implicit_weight_table(h, -1);
3779  } else {
3780  h->use_weight = 0;
3781  for (i = 0; i < 2; i++) {
3782  h->luma_weight_flag[i] = 0;
3783  h->chroma_weight_flag[i] = 0;
3784  }
3785  }
3786 
3787  // If frame-mt is enabled, only update mmco tables for the first slice
3788  // in a field. Subsequent slices can temporarily clobber h->mmco_index
3789  // or h->mmco, which will cause ref list mix-ups and decoding errors
3790  // further down the line. This may break decoding if the first slice is
3791  // corrupt, thus we only do this if frame-mt is enabled.
3792  if (h->nal_ref_idc) {
3793  ret = ff_h264_decode_ref_pic_marking(h0, &h->gb,
3795  h0->current_slice == 0);
3796  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
3797  return AVERROR_INVALIDDATA;
3798  }
3799 
3800  if (FRAME_MBAFF(h)) {
3802 
3804  implicit_weight_table(h, 0);
3805  implicit_weight_table(h, 1);
3806  }
3807  }
3808 
3812 
3813  if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
3814  tmp = get_ue_golomb_31(&h->gb);
3815  if (tmp > 2) {
3816  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
3817  return AVERROR_INVALIDDATA;
3818  }
3819  h->cabac_init_idc = tmp;
3820  }
3821 
3822  h->last_qscale_diff = 0;
3823  tmp = h->pps.init_qp + get_se_golomb(&h->gb);
3824  if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) {
3825  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
3826  return AVERROR_INVALIDDATA;
3827  }
3828  h->qscale = tmp;
3829  h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale);
3830  h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale);
3831  // FIXME qscale / qp ... stuff
3832  if (h->slice_type == AV_PICTURE_TYPE_SP)
3833  get_bits1(&h->gb); /* sp_for_switch_flag */
3834  if (h->slice_type == AV_PICTURE_TYPE_SP ||
3836  get_se_golomb(&h->gb); /* slice_qs_delta */
3837 
3838  h->deblocking_filter = 1;
3839  h->slice_alpha_c0_offset = 0;
3840  h->slice_beta_offset = 0;
3842  tmp = get_ue_golomb_31(&h->gb);
3843  if (tmp > 2) {
3845  "deblocking_filter_idc %u out of range\n", tmp);
3846  return AVERROR_INVALIDDATA;
3847  }
3848  h->deblocking_filter = tmp;
3849  if (h->deblocking_filter < 2)
3850  h->deblocking_filter ^= 1; // 1<->0
3851 
3852  if (h->deblocking_filter) {
3853  h->slice_alpha_c0_offset = get_se_golomb(&h->gb) * 2;
3854  h->slice_beta_offset = get_se_golomb(&h->gb) * 2;
3855  if (h->slice_alpha_c0_offset > 12 ||
3856  h->slice_alpha_c0_offset < -12 ||
3857  h->slice_beta_offset > 12 ||
3858  h->slice_beta_offset < -12) {
3860  "deblocking filter parameters %d %d out of range\n",
3862  return AVERROR_INVALIDDATA;
3863  }
3864  }
3865  }
3866 
3867  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
3873  h->nal_ref_idc == 0))
3874  h->deblocking_filter = 0;
3875 
3876  if (h->deblocking_filter == 1 && h0->max_contexts > 1) {
3877  if (h->avctx->flags2 & CODEC_FLAG2_FAST) {
3878  /* Cheat slightly for speed:
3879  * Do not bother to deblock across slices. */
3880  h->deblocking_filter = 2;
3881  } else {
3882  h0->max_contexts = 1;
3883  if (!h0->single_decode_warning) {
3884  av_log(h->avctx, AV_LOG_INFO,
3885  "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n");
3886  h0->single_decode_warning = 1;
3887  }
3888  if (h != h0) {
3890  "Deblocking switched inside frame.\n");
3891  return 1;
3892  }
3893  }
3894  }
3895  h->qp_thresh = 15 -
3897  FFMAX3(0,
3899  h->pps.chroma_qp_index_offset[1]) +
3900  6 * (h->sps.bit_depth_luma - 8);
3901 
3902  h0->last_slice_type = slice_type;
3903  h->slice_num = ++h0->current_slice;
3904  if (h->slice_num >= MAX_SLICES) {
3906  "Too many slices, increase MAX_SLICES and recompile\n");
3907  }
3908 
3909  for (j = 0; j < 2; j++) {
3910  int id_list[16];
3911  int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j];
3912  for (i = 0; i < 16; i++) {
3913  id_list[i] = 60;
3914  if (j < h->list_count && i < h->ref_count[j] &&
3915  h->ref_list[j][i].f.buf[0]) {
3916  int k;
3917  AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer;
3918  for (k = 0; k < h->short_ref_count; k++)
3919  if (h->short_ref[k]->f.buf[0]->buffer == buf) {
3920  id_list[i] = k;
3921  break;
3922  }
3923  for (k = 0; k < h->long_ref_count; k++)
3924  if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) {
3925  id_list[i] = h->short_ref_count + k;
3926  break;
3927  }
3928  }
3929  }
3930 
3931  ref2frm[0] =
3932  ref2frm[1] = -1;
3933  for (i = 0; i < 16; i++)
3934  ref2frm[i + 2] = 4 * id_list[i] + (h->ref_list[j][i].reference & 3);
3935  ref2frm[18 + 0] =
3936  ref2frm[18 + 1] = -1;
3937  for (i = 16; i < 48; i++)
3938  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
3939  (h->ref_list[j][i].reference & 3);
3940  }
3941 
3942  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
3944  "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
3945  h->slice_num,
3946  (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
3947  first_mb_in_slice,
3949  h->slice_type_fixed ? " fix" : "",
3950  h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
3951  pps_id, h->frame_num,
3952  h->cur_pic_ptr->field_poc[0],
3953  h->cur_pic_ptr->field_poc[1],
3954  h->ref_count[0], h->ref_count[1],
3955  h->qscale,
3956  h->deblocking_filter,
3958  h->use_weight,
3959  h->use_weight == 1 && h->use_weight_chroma ? "c" : "",
3960  h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
3961  }
3962 
3963  return 0;
3964 }
3965 
3967 {
3968  switch (h->slice_type) {
3969  case AV_PICTURE_TYPE_P:
3970  return 0;
3971  case AV_PICTURE_TYPE_B:
3972  return 1;
3973  case AV_PICTURE_TYPE_I:
3974  return 2;
3975  case AV_PICTURE_TYPE_SP:
3976  return 3;
3977  case AV_PICTURE_TYPE_SI:
3978  return 4;
3979  default:
3980  return AVERROR_INVALIDDATA;
3981  }
3982 }
3983 
3985  int mb_type, int top_xy,
3986  int left_xy[LEFT_MBS],
3987  int top_type,
3988  int left_type[LEFT_MBS],
3989  int mb_xy, int list)
3990 {
3991  int b_stride = h->b_stride;
3992  int16_t(*mv_dst)[2] = &h->mv_cache[list][scan8[0]];
3993  int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
3994  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
3995  if (USES_LIST(top_type, list)) {
3996  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
3997  const int b8_xy = 4 * top_xy + 2;
3998  int (*ref2frm)[64] = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF(h) ? 20 : 2);
3999  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
4000  ref_cache[0 - 1 * 8] =
4001  ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 0]];
4002  ref_cache[2 - 1 * 8] =
4003  ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 1]];
4004  } else {
4005  AV_ZERO128(mv_dst - 1 * 8);
4006  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
4007  }
4008 
4009  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
4010  if (USES_LIST(left_type[LTOP], list)) {
4011  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
4012  const int b8_xy = 4 * left_xy[LTOP] + 1;
4013  int (*ref2frm)[64] = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF(h) ? 20 : 2);
4014  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
4015  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
4016  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
4017  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
4018  ref_cache[-1 + 0] =
4019  ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
4020  ref_cache[-1 + 16] =
4021  ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
4022  } else {
4023  AV_ZERO32(mv_dst - 1 + 0);
4024  AV_ZERO32(mv_dst - 1 + 8);
4025  AV_ZERO32(mv_dst - 1 + 16);
4026  AV_ZERO32(mv_dst - 1 + 24);
4027  ref_cache[-1 + 0] =
4028  ref_cache[-1 + 8] =
4029  ref_cache[-1 + 16] =
4030  ref_cache[-1 + 24] = LIST_NOT_USED;
4031  }
4032  }
4033  }
4034 
4035  if (!USES_LIST(mb_type, list)) {
4036  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
4037  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
4038  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
4039  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
4040  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
4041  return;
4042  }
4043 
4044  {
4045  int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
4046  int (*ref2frm)[64] = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF(h) ? 20 : 2);
4047  uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
4048  uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
4049  AV_WN32A(&ref_cache[0 * 8], ref01);
4050  AV_WN32A(&ref_cache[1 * 8], ref01);
4051  AV_WN32A(&ref_cache[2 * 8], ref23);
4052  AV_WN32A(&ref_cache[3 * 8], ref23);
4053  }
4054 
4055  {
4056  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride];
4057  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
4058  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
4059  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
4060  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
4061  }
4062 }
4063 
4068 static int fill_filter_caches(H264Context *h, int mb_type)
4069 {
4070  const int mb_xy = h->mb_xy;
4071  int top_xy, left_xy[LEFT_MBS];
4072  int top_type, left_type[LEFT_MBS];
4073  uint8_t *nnz;
4074  uint8_t *nnz_cache;
4075 
4076  top_xy = mb_xy - (h->mb_stride << MB_FIELD(h));
4077 
4078  /* Wow, what a mess, why didn't they simplify the interlacing & intra
4079  * stuff, I can't imagine that these complex rules are worth it. */
4080 
4081  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
4082  if (FRAME_MBAFF(h)) {
4083  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
4084  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
4085  if (h->mb_y & 1) {
4086  if (left_mb_field_flag != curr_mb_field_flag)
4087  left_xy[LTOP] -= h->mb_stride;
4088  } else {
4089  if (curr_mb_field_flag)
4090  top_xy += h->mb_stride &
4091  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
4092  if (left_mb_field_flag != curr_mb_field_flag)
4093  left_xy[LBOT] += h->mb_stride;
4094  }
4095  }
4096 
4097  h->top_mb_xy = top_xy;
4098  h->left_mb_xy[LTOP] = left_xy[LTOP];
4099  h->left_mb_xy[LBOT] = left_xy[LBOT];
4100  {
4101  /* For sufficiently low qp, filtering wouldn't do anything.
4102  * This is a conservative estimate: could also check beta_offset
4103  * and more accurate chroma_qp. */
4104  int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
4105  int qp = h->cur_pic.qscale_table[mb_xy];
4106  if (qp <= qp_thresh &&
4107  (left_xy[LTOP] < 0 ||
4108  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
4109  (top_xy < 0 ||
4110  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
4111  if (!FRAME_MBAFF(h))
4112  return 1;
4113  if ((left_xy[LTOP] < 0 ||
4114  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
4115  (top_xy < h->mb_stride ||
4116  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
4117  return 1;
4118  }
4119  }
4120 
4121  top_type = h->cur_pic.mb_type[top_xy];
4122  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
4123  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
4124  if (h->deblocking_filter == 2) {
4125  if (h->slice_table[top_xy] != h->slice_num)
4126  top_type = 0;
4127  if (h->slice_table[left_xy[LBOT]] != h->slice_num)
4128  left_type[LTOP] = left_type[LBOT] = 0;
4129  } else {
4130  if (h->slice_table[top_xy] == 0xFFFF)
4131  top_type = 0;
4132  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
4133  left_type[LTOP] = left_type[LBOT] = 0;
4134  }
4135  h->top_type = top_type;
4136  h->left_type[LTOP] = left_type[LTOP];
4137  h->left_type[LBOT] = left_type[LBOT];
4138 
4139  if (IS_INTRA(mb_type))
4140  return 0;
4141 
4142  fill_filter_caches_inter(h, mb_type, top_xy, left_xy,
4143  top_type, left_type, mb_xy, 0);
4144  if (h->list_count == 2)
4145  fill_filter_caches_inter(h, mb_type, top_xy, left_xy,
4146  top_type, left_type, mb_xy, 1);
4147 
4148  nnz = h->non_zero_count[mb_xy];
4149  nnz_cache = h->non_zero_count_cache;
4150  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
4151  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
4152  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
4153  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
4154  h->cbp = h->cbp_table[mb_xy];
4155 
4156  if (top_type) {
4157  nnz = h->non_zero_count[top_xy];
4158  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
4159  }
4160 
4161  if (left_type[LTOP]) {
4162  nnz = h->non_zero_count[left_xy[LTOP]];
4163  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
4164  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
4165  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
4166  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
4167  }
4168 
4169  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
4170  * from what the loop filter needs */
4171  if (!CABAC(h) && h->pps.transform_8x8_mode) {
4172  if (IS_8x8DCT(top_type)) {
4173  nnz_cache[4 + 8 * 0] =
4174  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
4175  nnz_cache[6 + 8 * 0] =
4176  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
4177  }
4178  if (IS_8x8DCT(left_type[LTOP])) {
4179  nnz_cache[3 + 8 * 1] =
4180  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
4181  }
4182  if (IS_8x8DCT(left_type[LBOT])) {
4183  nnz_cache[3 + 8 * 3] =
4184  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
4185  }
4186 
4187  if (IS_8x8DCT(mb_type)) {
4188  nnz_cache[scan8[0]] =
4189  nnz_cache[scan8[1]] =
4190  nnz_cache[scan8[2]] =
4191  nnz_cache[scan8[3]] = (h->cbp & 0x1000) >> 12;
4192 
4193  nnz_cache[scan8[0 + 4]] =
4194  nnz_cache[scan8[1 + 4]] =
4195  nnz_cache[scan8[2 + 4]] =
4196  nnz_cache[scan8[3 + 4]] = (h->cbp & 0x2000) >> 12;
4197 
4198  nnz_cache[scan8[0 + 8]] =
4199  nnz_cache[scan8[1 + 8]] =
4200  nnz_cache[scan8[2 + 8]] =
4201  nnz_cache[scan8[3 + 8]] = (h->cbp & 0x4000) >> 12;
4202 
4203  nnz_cache[scan8[0 + 12]] =
4204  nnz_cache[scan8[1 + 12]] =
4205  nnz_cache[scan8[2 + 12]] =
4206  nnz_cache[scan8[3 + 12]] = (h->cbp & 0x8000) >> 12;
4207  }
4208  }
4209 
4210  return 0;
4211 }
4212 
4213 static void loop_filter(H264Context *h, int start_x, int end_x)
4214 {
4215  uint8_t *dest_y, *dest_cb, *dest_cr;
4216  int linesize, uvlinesize, mb_x, mb_y;
4217  const int end_mb_y = h->mb_y + FRAME_MBAFF(h);
4218  const int old_slice_type = h->slice_type;
4219  const int pixel_shift = h->pixel_shift;
4220  const int block_h = 16 >> h->chroma_y_shift;
4221 
4222  if (h->deblocking_filter) {
4223  for (mb_x = start_x; mb_x < end_x; mb_x++)
4224  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
4225  int mb_xy, mb_type;
4226  mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride;
4227  h->slice_num = h->slice_table[mb_xy];
4228  mb_type = h->cur_pic.mb_type[mb_xy];
4229  h->list_count = h->list_counts[mb_xy];
4230 
4231  if (FRAME_MBAFF(h))
4232  h->mb_mbaff =
4233  h->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
4234 
4235  h->mb_x = mb_x;
4236  h->mb_y = mb_y;
4237  dest_y = h->cur_pic.f.data[0] +
4238  ((mb_x << pixel_shift) + mb_y * h->linesize) * 16;
4239  dest_cb = h->cur_pic.f.data[1] +
4240  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
4241  mb_y * h->uvlinesize * block_h;
4242  dest_cr = h->cur_pic.f.data[2] +
4243  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
4244  mb_y * h->uvlinesize * block_h;
4245  // FIXME simplify above
4246 
4247  if (MB_FIELD(h)) {
4248  linesize = h->mb_linesize = h->linesize * 2;
4249  uvlinesize = h->mb_uvlinesize = h->uvlinesize * 2;
4250  if (mb_y & 1) { // FIXME move out of this function?
4251  dest_y -= h->linesize * 15;
4252  dest_cb -= h->uvlinesize * (block_h - 1);
4253  dest_cr -= h->uvlinesize * (block_h - 1);
4254  }
4255  } else {
4256  linesize = h->mb_linesize = h->linesize;
4257  uvlinesize = h->mb_uvlinesize = h->uvlinesize;
4258  }
4259  backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize,
4260  uvlinesize, 0);
4261  if (fill_filter_caches(h, mb_type))
4262  continue;
4263  h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.qscale_table[mb_xy]);
4264  h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.qscale_table[mb_xy]);
4265 
4266  if (FRAME_MBAFF(h)) {
4267  ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr,
4268  linesize, uvlinesize);
4269  } else {
4270  ff_h264_filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb,
4271  dest_cr, linesize, uvlinesize);
4272  }
4273  }
4274  }
4275  h->slice_type = old_slice_type;
4276  h->mb_x = end_x;
4277  h->mb_y = end_mb_y - FRAME_MBAFF(h);
4278  h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale);
4279  h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale);
4280 }
4281 
4283 {
4284  const int mb_xy = h->mb_x + h->mb_y * h->mb_stride;
4285  int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ?
4286  h->cur_pic.mb_type[mb_xy - 1] :
4287  (h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ?
4288  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
4289  h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
4290 }
4291 
4296 {
4297  int top = 16 * (h->mb_y >> FIELD_PICTURE(h));
4298  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
4299  int height = 16 << FRAME_MBAFF(h);
4300  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
4301 
4302  if (h->deblocking_filter) {
4303  if ((top + height) >= pic_height)
4304  height += deblock_border;
4305  top -= deblock_border;
4306  }
4307 
4308  if (top >= pic_height || (top + height) < 0)
4309  return;
4310 
4311  height = FFMIN(height, pic_height - top);
4312  if (top < 0) {
4313  height = top + height;
4314  top = 0;
4315  }
4316 
4317  ff_h264_draw_horiz_band(h, top, height);
4318 
4319  if (h->droppable)
4320  return;
4321 
4322  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
4324 }
4325 
4326 static void er_add_slice(H264Context *h, int startx, int starty,
4327  int endx, int endy, int status)
4328 {
4329 #if CONFIG_ERROR_RESILIENCE
4330  ERContext *er = &h->er;
4331 
4332  er->ref_count = h->ref_count[0];
4333  ff_er_add_slice(er, startx, starty, endx, endy, status);
4334 #endif
4335 }
4336 
4337 static int decode_slice(struct AVCodecContext *avctx, void *arg)
4338 {
4339  H264Context *h = *(void **)arg;
4340  int lf_x_start = h->mb_x;
4341 
4342  h->mb_skip_run = -1;
4343 
4345  avctx->codec_id != AV_CODEC_ID_H264 ||
4346  (CONFIG_GRAY && (h->flags & CODEC_FLAG_GRAY));
4347 
4348  if (h->pps.cabac) {
4349  /* realign */
4350  align_get_bits(&h->gb);
4351 
4352  /* init cabac */
4354  h->gb.buffer + get_bits_count(&h->gb) / 8,
4355  (get_bits_left(&h->gb) + 7) / 8);
4356 
4358 
4359  for (;;) {
4360  // START_TIMER
4361  int ret = ff_h264_decode_mb_cabac(h);
4362  int eos;
4363  // STOP_TIMER("decode_mb_cabac")
4364 
4365  if (ret >= 0)
4367 
4368  // FIXME optimal? or let mb_decode decode 16x32 ?
4369  if (ret >= 0 && FRAME_MBAFF(h)) {
4370  h->mb_y++;
4371 
4372  ret = ff_h264_decode_mb_cabac(h);
4373 
4374  if (ret >= 0)
4376  h->mb_y--;
4377  }
4378  eos = get_cabac_terminate(&h->cabac);
4379 
4380  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
4381  h->cabac.bytestream > h->cabac.bytestream_end + 2) {
4382  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1,
4383  h->mb_y, ER_MB_END);
4384  if (h->mb_x >= lf_x_start)
4385  loop_filter(h, lf_x_start, h->mb_x + 1);
4386  return 0;
4387  }
4388  if (ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 2) {
4390  "error while decoding MB %d %d, bytestream %td\n",
4391  h->mb_x, h->mb_y,
4393  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4394  h->mb_y, ER_MB_ERROR);
4395  return AVERROR_INVALIDDATA;
4396  }
4397 
4398  if (++h->mb_x >= h->mb_width) {
4399  loop_filter(h, lf_x_start, h->mb_x);
4400  h->mb_x = lf_x_start = 0;
4401  decode_finish_row(h);
4402  ++h->mb_y;
4403  if (FIELD_OR_MBAFF_PICTURE(h)) {
4404  ++h->mb_y;
4405  if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
4407  }
4408  }
4409 
4410  if (eos || h->mb_y >= h->mb_height) {
4411  tprintf(h->avctx, "slice end %d %d\n",
4412  get_bits_count(&h->gb), h->gb.size_in_bits);
4413  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1,
4414  h->mb_y, ER_MB_END);
4415  if (h->mb_x > lf_x_start)
4416  loop_filter(h, lf_x_start, h->mb_x);
4417  return 0;
4418  }
4419  }
4420  } else {
4421  for (;;) {
4422  int ret = ff_h264_decode_mb_cavlc(h);
4423 
4424  if (ret >= 0)
4426 
4427  // FIXME optimal? or let mb_decode decode 16x32 ?
4428  if (ret >= 0 && FRAME_MBAFF(h)) {
4429  h->mb_y++;
4430  ret = ff_h264_decode_mb_cavlc(h);
4431 
4432  if (ret >= 0)
4434  h->mb_y--;
4435  }
4436 
4437  if (ret < 0) {
4439  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
4440  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4441  h->mb_y, ER_MB_ERROR);
4442  return ret;
4443  }
4444 
4445  if (++h->mb_x >= h->mb_width) {
4446  loop_filter(h, lf_x_start, h->mb_x);
4447  h->mb_x = lf_x_start = 0;
4448  decode_finish_row(h);
4449  ++h->mb_y;
4450  if (FIELD_OR_MBAFF_PICTURE(h)) {
4451  ++h->mb_y;
4452  if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
4454  }
4455  if (h->mb_y >= h->mb_height) {
4456  tprintf(h->avctx, "slice end %d %d\n",
4457  get_bits_count(&h->gb), h->gb.size_in_bits);
4458 
4459  if (get_bits_left(&h->gb) == 0) {
4461  h->mb_x - 1, h->mb_y,
4462  ER_MB_END);
4463 
4464  return 0;
4465  } else {
4467  h->mb_x - 1, h->mb_y,
4468  ER_MB_END);
4469 
4470  return AVERROR_INVALIDDATA;
4471  }
4472  }
4473  }
4474 
4475  if (get_bits_left(&h->gb) <= 0 && h->mb_skip_run <= 0) {
4476  tprintf(h->avctx, "slice end %d %d\n",
4477  get_bits_count(&h->gb), h->gb.size_in_bits);
4478 
4479  if (get_bits_left(&h->gb) == 0) {
4481  h->mb_x - 1, h->mb_y,
4482  ER_MB_END);
4483  if (h->mb_x > lf_x_start)
4484  loop_filter(h, lf_x_start, h->mb_x);
4485 
4486  return 0;
4487  } else {
4488  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4489  h->mb_y, ER_MB_ERROR);
4490 
4491  return AVERROR_INVALIDDATA;
4492  }
4493  }
4494  }
4495  }
4496 }
4497 
4504 static int execute_decode_slices(H264Context *h, unsigned context_count)
4505 {
4506  AVCodecContext *const avctx = h->avctx;
4507  H264Context *hx;
4508  int i;
4509 
4510  if (h->mb_y >= h->mb_height) {
4512  "Input contains more MB rows than the frame height.\n");
4513  return AVERROR_INVALIDDATA;
4514  }
4515 
4516  if (h->avctx->hwaccel)
4517  return 0;
4518  if (context_count == 1) {
4519  return decode_slice(avctx, &h);
4520  } else {
4521  for (i = 1; i < context_count; i++) {
4522  hx = h->thread_context[i];
4523  hx->er.error_count = 0;
4524  }
4525 
4526  avctx->execute(avctx, decode_slice, h->thread_context,
4527  NULL, context_count, sizeof(void *));
4528 
4529  /* pull back stuff from slices to master context */
4530  hx = h->thread_context[context_count - 1];
4531  h->mb_x = hx->mb_x;
4532  h->mb_y = hx->mb_y;
4533  h->droppable = hx->droppable;
4535  for (i = 1; i < context_count; i++)
4537  }
4538 
4539  return 0;
4540 }
4541 
4542 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
4543  int parse_extradata)
4544 {
4545  AVCodecContext *const avctx = h->avctx;
4546  H264Context *hx;
4547  int buf_index;
4548  unsigned context_count;
4549  int next_avc;
4550  int pass = !(avctx->active_thread_type & FF_THREAD_FRAME);
4551  int nals_needed = 0;
4552  int nal_index;
4553  int ret = 0;
4554 
4556  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) {
4557  h->current_slice = 0;
4558  if (!h->first_field)
4559  h->cur_pic_ptr = NULL;
4560  ff_h264_reset_sei(h);
4561  }
4562 
4563  for (; pass <= 1; pass++) {
4564  buf_index = 0;
4565  context_count = 0;
4566  next_avc = h->is_avc ? 0 : buf_size;
4567  nal_index = 0;
4568  for (;;) {
4569  int consumed;
4570  int dst_length;
4571  int bit_length;
4572  const uint8_t *ptr;
4573  int i, nalsize = 0;
4574  int err;
4575 
4576  if (buf_index >= next_avc) {
4577  if (buf_index >= buf_size - h->nal_length_size)
4578  break;
4579  nalsize = 0;
4580  for (i = 0; i < h->nal_length_size; i++)
4581  nalsize = (nalsize << 8) | buf[buf_index++];
4582  if (nalsize <= 0 || nalsize > buf_size - buf_index) {
4584  "AVC: nal size %d\n", nalsize);
4585  break;
4586  }
4587  next_avc = buf_index + nalsize;
4588  } else {
4589  // start code prefix search
4590  for (; buf_index + 3 < next_avc; buf_index++)
4591  // This should always succeed in the first iteration.
4592  if (buf[buf_index] == 0 &&
4593  buf[buf_index + 1] == 0 &&
4594  buf[buf_index + 2] == 1)
4595  break;
4596 
4597  if (buf_index + 3 >= buf_size) {
4598  buf_index = buf_size;
4599  break;
4600  }
4601 
4602  buf_index += 3;
4603  if (buf_index >= next_avc)
4604  continue;
4605  }
4606 
4607  hx = h->thread_context[context_count];
4608 
4609  ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length,
4610  &consumed, next_avc - buf_index);
4611  if (ptr == NULL || dst_length < 0) {
4612  ret = -1;
4613  goto end;
4614  }
4615  i = buf_index + consumed;
4616  if ((h->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
4617  buf[i] == 0x00 && buf[i + 1] == 0x00 &&
4618  buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
4620 
4621  if (!(h->workaround_bugs & FF_BUG_TRUNCATED))
4622  while (dst_length > 0 && ptr[dst_length - 1] == 0)
4623  dst_length--;
4624  bit_length = !dst_length ? 0
4625  : (8 * dst_length -
4626  decode_rbsp_trailing(h, ptr + dst_length - 1));
4627 
4628  if (h->avctx->debug & FF_DEBUG_STARTCODE)
4630  "NAL %d at %d/%d length %d\n",
4631  hx->nal_unit_type, buf_index, buf_size, dst_length);
4632 
4633  if (h->is_avc && (nalsize != consumed) && nalsize)
4635  "AVC: Consumed only %d bytes instead of %d\n",
4636  consumed, nalsize);
4637 
4638  buf_index += consumed;
4639  nal_index++;
4640 
4641  if (pass == 0) {
4642  /* packets can sometimes contain multiple PPS/SPS,
4643  * e.g. two PAFF field pictures in one packet, or a demuxer
4644  * which splits NALs strangely if so, when frame threading we
4645  * can't start the next thread until we've read all of them */
4646  switch (hx->nal_unit_type) {
4647  case NAL_SPS:
4648  case NAL_PPS:
4649  nals_needed = nal_index;
4650  break;
4651  case NAL_DPA:
4652  case NAL_IDR_SLICE:
4653  case NAL_SLICE:
4654  init_get_bits(&hx->gb, ptr, bit_length);
4655  if (!get_ue_golomb(&hx->gb))
4656  nals_needed = nal_index;
4657  }
4658  continue;
4659  }
4660 
4661  if (avctx->skip_frame >= AVDISCARD_NONREF &&
4662  h->nal_ref_idc == 0 &&
4663  h->nal_unit_type != NAL_SEI)
4664  continue;
4665 
4666 again:
4667  /* Ignore every NAL unit type except PPS and SPS during extradata
4668  * parsing. Decoding slices is not possible in codec init
4669  * with frame-mt */
4670  if (parse_extradata && HAVE_THREADS &&
4672  (hx->nal_unit_type != NAL_PPS &&
4673  hx->nal_unit_type != NAL_SPS)) {
4674  if (hx->nal_unit_type < NAL_AUD ||
4676  av_log(avctx, AV_LOG_INFO,
4677  "Ignoring NAL unit %d during extradata parsing\n",
4678  hx->nal_unit_type);
4680  }
4681  err = 0;
4682  switch (hx->nal_unit_type) {
4683  case NAL_IDR_SLICE:
4684  if (h->nal_unit_type != NAL_IDR_SLICE) {
4686  "Invalid mix of idr and non-idr slices\n");
4687  ret = -1;
4688  goto end;
4689  }
4690  idr(h); // FIXME ensure we don't lose some frames if there is reordering
4691  case NAL_SLICE:
4692  init_get_bits(&hx->gb, ptr, bit_length);
4693  hx->intra_gb_ptr =
4694  hx->inter_gb_ptr = &hx->gb;
4695  hx->data_partitioning = 0;
4696 
4697  if ((err = decode_slice_header(hx, h)))
4698  break;
4699 
4700  if (h->sei_recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
4702  ((1 << h->sps.log2_max_frame_num) - 1);
4703  }
4704 
4705  h->cur_pic_ptr->f.key_frame |=
4706  (hx->nal_unit_type == NAL_IDR_SLICE) ||
4707  (h->sei_recovery_frame_cnt >= 0);
4708 
4709  if (hx->nal_unit_type == NAL_IDR_SLICE ||
4710  h->recovery_frame == h->frame_num) {
4711  h->recovery_frame = -1;
4712  h->cur_pic_ptr->recovered = 1;
4713  }
4714  // If we have an IDR, all frames after it in decoded order are
4715  // "recovered".
4716  if (hx->nal_unit_type == NAL_IDR_SLICE)
4719 
4720  if (h->current_slice == 1) {
4721  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS))
4722  decode_postinit(h, nal_index >= nals_needed);
4723 
4724  if (h->avctx->hwaccel &&
4725  (ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0)) < 0)
4726  return ret;
4727  }
4728 
4729  if (hx->redundant_pic_count == 0 &&
4730  (avctx->skip_frame < AVDISCARD_NONREF ||
4731  hx->nal_ref_idc) &&
4732  (avctx->skip_frame < AVDISCARD_BIDIR ||
4734  (avctx->skip_frame < AVDISCARD_NONKEY ||
4736  avctx->skip_frame < AVDISCARD_ALL) {
4737  if (avctx->hwaccel) {
4738  ret = avctx->hwaccel->decode_slice(avctx,
4739  &buf[buf_index - consumed],
4740  consumed);
4741  if (ret < 0)
4742  return ret;
4743  } else
4744  context_count++;
4745  }
4746  break;
4747  case NAL_DPA:
4748  if (h->avctx->flags & CODEC_FLAG2_CHUNKS) {
4750  "Decoding in chunks is not supported for "
4751  "partitioned slices.\n");
4752  return AVERROR(ENOSYS);
4753  }
4754 
4755  init_get_bits(&hx->gb, ptr, bit_length);
4756  hx->intra_gb_ptr =
4757  hx->inter_gb_ptr = NULL;
4758 
4759  if ((err = decode_slice_header(hx, h)) < 0) {
4760  /* make sure data_partitioning is cleared if it was set
4761  * before, so we don't try decoding a slice without a valid
4762  * slice header later */
4763  h->data_partitioning = 0;
4764  break;
4765  }
4766 
4767  hx->data_partitioning = 1;
4768  break;
4769  case NAL_DPB:
4770  init_get_bits(&hx->intra_gb, ptr, bit_length);
4771  hx->intra_gb_ptr = &hx->intra_gb;
4772  break;
4773  case NAL_DPC:
4774  init_get_bits(&hx->inter_gb, ptr, bit_length);
4775  hx->inter_gb_ptr = &hx->inter_gb;
4776 
4777  if (hx->redundant_pic_count == 0 &&
4778  hx->intra_gb_ptr &&
4779  hx->data_partitioning &&
4780  h->cur_pic_ptr && h->context_initialized &&
4781  (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) &&
4782  (avctx->skip_frame < AVDISCARD_BIDIR ||
4784  (avctx->skip_frame < AVDISCARD_NONKEY ||
4786  avctx->skip_frame < AVDISCARD_ALL)
4787  context_count++;
4788  break;
4789  case NAL_SEI:
4790  init_get_bits(&h->gb, ptr, bit_length);
4791  ff_h264_decode_sei(h);
4792  break;
4793  case NAL_SPS:
4794  init_get_bits(&h->gb, ptr, bit_length);
4796  if (ret < 0 && h->is_avc && (nalsize != consumed) && nalsize) {
4798  "SPS decoding failure, trying again with the complete NAL\n");
4799  init_get_bits(&h->gb, buf + buf_index + 1 - consumed,
4800  8 * (nalsize - 1));
4802  }
4803 
4804  ret = h264_set_parameter_from_sps(h);
4805  if (ret < 0)
4806  goto end;
4807 
4808  break;
4809  case NAL_PPS:
4810  init_get_bits(&h->gb, ptr, bit_length);
4811  ff_h264_decode_picture_parameter_set(h, bit_length);
4812  break;
4813  case NAL_AUD:
4814  case NAL_END_SEQUENCE:
4815  case NAL_END_STREAM:
4816  case NAL_FILLER_DATA:
4817  case NAL_SPS_EXT:
4818  case NAL_AUXILIARY_SLICE:
4819  break;
4820  case NAL_FF_IGNORE:
4821  break;
4822  default:
4823  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
4824  hx->nal_unit_type, bit_length);
4825  }
4826 
4827  if (context_count == h->max_contexts) {
4828  execute_decode_slices(h, context_count);
4829  context_count = 0;
4830  }
4831 
4832  if (err < 0) {
4833  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
4834  h->ref_count[0] = h->ref_count[1] = h->list_count = 0;
4835  } else if (err == 1) {
4836  /* Slice could not be decoded in parallel mode, copy down
4837  * NAL unit stuff to context 0 and restart. Note that
4838  * rbsp_buffer is not transferred, but since we no longer
4839  * run in parallel mode this should not be an issue. */
4840  h->nal_unit_type = hx->nal_unit_type;
4841  h->nal_ref_idc = hx->nal_ref_idc;
4842  hx = h;
4843  goto again;
4844  }
4845  }
4846  }
4847  if (context_count)
4848  execute_decode_slices(h, context_count);
4849 
4850 end:
4851  /* clean up */
4852  if (h->cur_pic_ptr && !h->droppable) {
4855  }
4856 
4857  return (ret < 0) ? ret : buf_index;
4858 }
4859 
4863 static int get_consumed_bytes(int pos, int buf_size)
4864 {
4865  if (pos == 0)
4866  pos = 1; // avoid infinite loops (i doubt that is needed but ...)
4867  if (pos + 10 > buf_size)
4868  pos = buf_size; // oops ;)
4869 
4870  return pos;
4871 }
4872 
4873 static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
4874 {
4875  int i;
4876  int ret = av_frame_ref(dst, src);
4877  if (ret < 0)
4878  return ret;
4879 
4880  if (!h->sps.crop)
4881  return 0;
4882 
4883  for (i = 0; i < 3; i++) {
4884  int hshift = (i > 0) ? h->chroma_x_shift : 0;
4885  int vshift = (i > 0) ? h->chroma_y_shift : 0;
4886  int off = ((h->sps.crop_left >> hshift) << h->pixel_shift) +
4887  (h->sps.crop_top >> vshift) * dst->linesize[i];
4888  dst->data[i] += off;
4889  }
4890  return 0;
4891 }
4892 
4893 static int h264_decode_frame(AVCodecContext *avctx, void *data,
4894  int *got_frame, AVPacket *avpkt)
4895 {
4896  const uint8_t *buf = avpkt->data;
4897  int buf_size = avpkt->size;
4898  H264Context *h = avctx->priv_data;
4899  AVFrame *pict = data;
4900  int buf_index = 0;
4901  int ret;
4902 
4903  h->flags = avctx->flags;
4904  /* reset data partitioning here, to ensure GetBitContexts from previous
4905  * packets do not get used. */
4906  h->data_partitioning = 0;
4907 
4908  /* end of stream, output what is still in the buffers */
4909 out:
4910  if (buf_size == 0) {
4911  Picture *out;
4912  int i, out_idx;
4913 
4914  h->cur_pic_ptr = NULL;
4915 
4916  // FIXME factorize this with the output code below
4917  out = h->delayed_pic[0];
4918  out_idx = 0;
4919  for (i = 1;
4920  h->delayed_pic[i] &&
4921  !h->delayed_pic[i]->f.key_frame &&
4922  !h->delayed_pic[i]->mmco_reset;
4923  i++)
4924  if (h->delayed_pic[i]->poc < out->poc) {
4925  out = h->delayed_pic[i];
4926  out_idx = i;
4927  }
4928 
4929  for (i = out_idx; h->delayed_pic[i]; i++)
4930  h->delayed_pic[i] = h->delayed_pic[i + 1];
4931 
4932  if (out) {
4933  ret = output_frame(h, pict, &out->f);
4934  if (ret < 0)
4935  return ret;
4936  *got_frame = 1;
4937  }
4938 
4939  return buf_index;
4940  }
4941 
4942  buf_index = decode_nal_units(h, buf, buf_size, 0);
4943  if (buf_index < 0)
4944  return AVERROR_INVALIDDATA;
4945 
4946  if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
4947  buf_size = 0;
4948  goto out;
4949  }
4950 
4951  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
4952  if (avctx->skip_frame >= AVDISCARD_NONREF)
4953  return 0;
4954  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
4955  return AVERROR_INVALIDDATA;
4956  }
4957 
4958  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) ||
4959  (h->mb_y >= h->mb_height && h->mb_height)) {
4960  if (avctx->flags2 & CODEC_FLAG2_CHUNKS)
4961  decode_postinit(h, 1);
4962 
4963  field_end(h, 0);
4964 
4965  *got_frame = 0;
4966  if (h->next_output_pic && ((avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT) ||
4967  h->next_output_pic->recovered)) {
4968  if (!h->next_output_pic->recovered)
4970 
4971  ret = output_frame(h, pict, &h->next_output_pic->f);
4972  if (ret < 0)
4973  return ret;
4974  *got_frame = 1;
4975  }
4976  }
4977 
4978  assert(pict->buf[0] || !*got_frame);
4979 
4980  return get_consumed_bytes(buf_index, buf_size);
4981 }
4982 
4984 {
4985  int i;
4986 
4987  free_tables(h, 1); // FIXME cleanup init stuff perhaps
4988 
4989  for (i = 0; i < MAX_SPS_COUNT; i++)
4990  av_freep(h->sps_buffers + i);
4991 
4992  for (i = 0; i < MAX_PPS_COUNT; i++)
4993  av_freep(h->pps_buffers + i);
4994 }
4995 
4997 {
4998  H264Context *h = avctx->priv_data;
4999 
5001 
5002  unref_picture(h, &h->cur_pic);
5003 
5004  return 0;
5005 }
5006 
5007 static const AVProfile profiles[] = {
5008  { FF_PROFILE_H264_BASELINE, "Baseline" },
5009  { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
5010  { FF_PROFILE_H264_MAIN, "Main" },
5011  { FF_PROFILE_H264_EXTENDED, "Extended" },
5012  { FF_PROFILE_H264_HIGH, "High" },
5013  { FF_PROFILE_H264_HIGH_10, "High 10" },
5014  { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
5015  { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
5016  { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
5017  { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
5018  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
5019  { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
5020  { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
5021  { FF_PROFILE_UNKNOWN },
5022 };
5023 
5025  .name = "h264",
5026  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
5027  .type = AVMEDIA_TYPE_VIDEO,
5028  .id = AV_CODEC_ID_H264,
5029  .priv_data_size = sizeof(H264Context),
5033  .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 |
5036  .flush = flush_dpb,
5037  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
5038  .update_thread_context = ONLY_IF_THREADS_ENABLED(decode_update_thread_context),
5039  .profiles = NULL_IF_CONFIG_SMALL(profiles),
5040 };
int chroma_format_idc
Definition: h264.h:152
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free
Definition: mpegvideo.h:213
Picture default_ref_list[2][32]
base reference list for all slices of a coded picture
Definition: h264.h:519
#define PICT_BOTTOM_FIELD
Definition: mpegvideo.h:645
#define FF_PROFILE_H264_MAIN
Definition: avcodec.h:2629
void ff_h264_direct_dist_scale_factor(H264Context *const h)
Definition: h264_direct.c:50
GetBitContext inter_gb
Definition: h264.h:411
#define XCHG(a, b, xchg)
int video_signal_type_present_flag
Definition: h264.h:177
void(* h264_idct_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:80
#define VERT_PRED8x8
Definition: h264pred.h:70
int last_slice_type
Definition: h264.h:569
int ff_h264_decode_mb_cabac(H264Context *h)
Decode a CABAC coded macroblock.
Definition: h264_cabac.c:1881
static void clone_tables(H264Context *dst, H264Context *src, int i)
Mimic alloc_tables(), but for every context thread.
Definition: h264.c:1359
const struct AVCodec * codec
Definition: avcodec.h:1063
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:62
int ff_h264_decode_seq_parameter_set(H264Context *h)
Decode SPS.
Definition: h264_ps.c:297
#define PICT_TOP_FIELD
Definition: mpegvideo.h:644
discard all frames except keyframes
Definition: avcodec.h:545
uint8_t * edge_emu_buffer
Definition: h264.h:649
int8_t * ref_index[2]
Definition: mpegvideo.h:140
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2903
int workaround_bugs
Definition: h264.h:292
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2440
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:65
#define MAX_PICTURE_COUNT
Definition: mpegvideo.h:66
unsigned int top_samples_available
Definition: h264.h:318
#define FF_PROFILE_H264_CAVLC_444
Definition: avcodec.h:2639
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata)
Definition: h264.c:4542
unsigned int topleft_samples_available
Definition: h264.h:317
#define DC_128_PRED8x8
Definition: h264pred.h:76
int single_decode_warning
1 if the single thread fallback warning has already been displayed, 0 otherwise.
Definition: h264.h:565
GetBitContext gb
Definition: h264.h:267
#define CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:691
Views are packed per line, as if interlaced.
Definition: stereo3d.h:97
#define AV_NUM_DATA_POINTERS
Definition: frame.h:108
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG2 field pics)
Definition: avcodec.h:1553
5: top field, bottom field, top field repeated, in that order
Definition: h264.h:139
static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264.c:2410
#define HAVE_THREADS
Definition: config.h:250
int low_delay
Definition: h264.h:288
int mb_num
Definition: h264.h:460
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:105
int size
GetBitContext * intra_gb_ptr
Definition: h264.h:412
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1507
This structure describes decoded (raw) audio or video data.
Definition: frame.h:107
int mb_aff_frame
Definition: h264.h:372
void(* pred8x8l_add[2])(uint8_t *pix, int16_t *block, ptrdiff_t stride)
Definition: h264pred.h:102
int delta_poc[2]
Definition: h264.h:499
#define IS_SUB_4X4(a)
Definition: mpegvideo.h:175
Views are alternated temporally.
Definition: stereo3d.h:66
ptrdiff_t uvlinesize
Definition: h264.h:281
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:179
int last_qscale_diff
Definition: h264.h:433
#define CHROMA444(h)
Definition: h264.h:89
#define LEFT_MBS
Definition: h264.h:66
mpeg2/4, h264 default
Definition: avcodec.h:607
int quincunx_subsampling
Definition: h264.h:591
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1227
int cbp
Definition: h264.h:428
3: top field, bottom field, in that order
Definition: h264.h:137
const uint8_t * ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length)
Decode a network abstraction layer unit.
Definition: h264.c:577
int first_field
Definition: h264.h:376
static const uint8_t field_scan8x8_cavlc[64]
Definition: h264.c:91
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:70
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:87
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:129
#define ER_MB_END
AVFrame * f
Definition: thread.h:36
int weighted_bipred_idc
Definition: h264.h:219
int chroma_qp_index_offset[2]
Definition: h264.h:222
const uint8_t * bytestream_end
Definition: cabac.h:47
int left_type[LEFT_MBS]
Definition: h264.h:309
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:365
H264ChromaContext h264chroma
Definition: h264.h:263
uint16_t * cbp_table
Definition: h264.h:427
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:1517
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264.h:640
MMCO mmco[MAX_MMCO_COUNT]
memory management control operations buffer.
Definition: h264.h:531
7: frame doubling
Definition: h264.h:141
void ff_er_frame_end(ERContext *s)
static av_always_inline void mc_part_weighted(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op, h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264.c:1030
#define MAX_PPS_COUNT
Definition: h264.h:43
Sequence parameter set.
Definition: h264.h:148
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1754
static void init_dequant_tables(H264Context *h)
Definition: h264.c:1278
int coded_picture_number
Definition: h264.h:287
int mb_y
Definition: h264.h:454
int bitstream_restriction_flag
Definition: h264.h:188
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:160
#define FF_PROFILE_H264_INTRA
Definition: avcodec.h:2625
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:217
#define FMO
Definition: h264.h:53
int num
numerator
Definition: rational.h:44
static enum AVPixelFormat get_pixel_format(H264Context *h)
Definition: h264.c:3084
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:287
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
Definition: mpegvideo.h:198
int size
Definition: avcodec.h:974
static void unref_picture(H264Context *h, Picture *pic)
Definition: h264.c:256
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:232
AVBufferPool * mb_type_pool
Definition: h264.h:653
int outputed_poc
Definition: h264.h:525
int chroma_x_shift
Definition: h264.h:282
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
Definition: pixfmt.h:127
#define CONFIG_SVQ3_DECODER
Definition: config.h:522
const uint8_t * buffer
Definition: get_bits.h:54
Picture parameter set.
Definition: h264.h:211
int field_picture
whether or not the picture was encoded in separate fields
Definition: mpegvideo.h:192
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1422
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264.c:277
int flags
Definition: h264.h:291
const uint8_t * field_scan8x8_q0
Definition: h264.h:449
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1247
int frame_mbs_only_flag
Definition: h264.h:165
int mb_height
Definition: h264.h:458
int16_t * dc_val_base
Definition: h264.h:650
int is_avc
Used to parse AVC variant of h264.
Definition: h264.h:481
static av_always_inline void fill_filter_caches_inter(H264Context *h, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264.c:3984
int mmco_index
Definition: h264.h:532
AVBufferPool * ref_index_pool
Definition: h264.h:655
static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264.c:1658
uint8_t zigzag_scan8x8_cavlc[64]
Definition: h264.h:441
DSPContext dsp
Definition: h264.h:260
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:39
mpegvideo header.
int ff_h264_get_profile(SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264.c:3024
uint32_t dequant8_buffer[6][QP_MAX_NUM+1][64]
Definition: h264.h:361
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:334
void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:325
H264Context.
Definition: h264.h:258
discard all
Definition: avcodec.h:546
static int context_init(H264Context *h)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264.c:1386
int mmco_reset
h264 MMCO_RESET set this 1. Reordering code must not mix pictures before and after MMCO_RESET...
Definition: mpegvideo.h:185
#define IS_INTRA4x4(a)
Definition: mpegvideo.h:158
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264.h:501
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2488
uint32_t num_units_in_tick
Definition: h264.h:184
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
4: bottom field, top field, in that order
Definition: h264.h:138
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264.h:631
static int get_lowest_part_list_y(H264Context *h, Picture *pic, int n, int height, int y_offset, int list)
Definition: h264.c:704
int profile
profile
Definition: avcodec.h:2596
#define HOR_PRED8x8
Definition: h264pred.h:69
int stride
Definition: mace.c:144
AVCodec.
Definition: avcodec.h:2755
int frame_start_found
Definition: parser.h:34
int picture_structure
Definition: h264.h:375
#define AV_WN32A(p, v)
Definition: intreadwrite.h:458
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:368
#define AV_COPY32(d, s)
Definition: intreadwrite.h:506
static const uint8_t rem6[QP_MAX_NUM+1]
Definition: h264.c:53
#define IS_INTRA_PCM(a)
Definition: mpegvideo.h:164
int profile_idc
Definition: h264.h:150
static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int height, int delta, int list, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int src_x_offset, int src_y_offset, qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op, int pixel_shift, int chroma_idc)
Definition: h264.c:868
#define CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:658
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:827
void(* h264_add_pixels4_clear)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:107
static const uint8_t zigzag_scan[16]
Definition: h264data.h:55
int recovered
Picture at IDR or recovery point + recovery count.
Definition: mpegvideo.h:202
int mb_skip_run
Definition: h264.h:457
void ff_h264_init_cabac_states(H264Context *h)
Definition: h264_cabac.c:1262
#define FFALIGN(x, a)
Definition: common.h:62
#define USES_LIST(a, list)
does this mb use listX, note does not work if subMBs
Definition: mpegvideo.h:179
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
Definition: avcodec.h:2637
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1173
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
#define CONFIG_GRAY
Definition: config.h:330
Switching Intra.
Definition: avutil.h:257
uint8_t * chroma_pred_mode_table
Definition: h264.h:432
#define IS_DIR(a, part, list)
Definition: mpegvideo.h:178
static const uint8_t div6[QP_MAX_NUM+1]
Definition: h264.c:59
void(* pred16x16_add[3])(uint8_t *pix, const int *block_offset, int16_t *block, ptrdiff_t stride)
Definition: h264pred.h:107
enum AVDiscard skip_frame
Definition: avcodec.h:2701
Definition: vf_drawbox.c:37
int ff_h264_decode_ref_pic_list_reordering(H264Context *h)
Definition: h264_refs.c:212
#define MAX_THREADS
Definition: mpegvideo.h:64
static const uint8_t golomb_to_pict_type[5]
Definition: h264data.h:38
#define AV_RN32A(p)
Definition: intreadwrite.h:446
int ref_poc[2][2][32]
h264 POCs of the frames used as reference (FIXME need per slice)
Definition: mpegvideo.h:189
void(* h264_idct_add16)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:89
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2417
unsigned int crop_top
frame_cropping_rect_top_offset
Definition: h264.h:173
int long_ref
1->long term reference 0->short term reference
Definition: mpegvideo.h:188
static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264.c:2526
static int decode_init_thread_copy(AVCodecContext *avctx)
Definition: h264.c:1636
int resync_mb_y
Definition: h264.h:456
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define IS_8x8DCT(a)
Definition: h264.h:96
uint8_t scaling_matrix4[6][16]
Definition: h264.h:227
const uint8_t * bytestream
Definition: cabac.h:46
int ref2frm[MAX_SLICES][2][64]
reference to frame number lists, used in the loop filter, the first 2 are for -2,-1 ...
Definition: h264.h:407
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
Definition: h264.h:223
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:269
#define FF_PROFILE_H264_BASELINE
Definition: avcodec.h:2627
#define IS_INTER(a)
Definition: mpegvideo.h:162
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint32_t(*[6] dequant4_coeff)[16]
Definition: h264.h:362
#define CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries...
Definition: avcodec.h:696
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:37
uint8_t
#define av_cold
Definition: attributes.h:66
int ref_count[2][2]
number of entries in ref_poc (FIXME need per slice)
Definition: mpegvideo.h:190
int prev_frame_num_offset
for POC type 2
Definition: h264.h:504
int use_weight
Definition: h264.h:381
int full_range
Definition: h264.h:178
unsigned int crop_left
frame_cropping_rect_left_offset
Definition: h264.h:171
#define IS_8X16(a)
Definition: mpegvideo.h:170
int offset_for_non_ref_pic
Definition: h264.h:158
float delta
#define PICT_FRAME
Definition: mpegvideo.h:646
void ff_h264_reset_sei(H264Context *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:37
Definition: h264.h:110
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:123
int data_partitioning
Definition: h264.h:286
int luma_weight[48][2][2]
Definition: h264.h:386
int bit_depth_chroma
bit_depth_chroma_minus8 + 8
Definition: h264.h:202
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2349
enum AVColorPrimaries color_primaries
Definition: h264.h:180
static int find_unused_picture(H264Context *h)
Definition: h264.c:466
AVCodec ff_h264_decoder
Definition: h264.c:5024
Multithreading support functions.
Picture ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:404
static const uint8_t field_scan8x8[64]
Definition: h264.c:72
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
#define ER_MB_ERROR
int cabac
entropy_coding_mode_flag
Definition: h264.h:213
int mb_xy
Definition: h264.h:461
Definition: h264.h:108
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:251
#define LUMA_DC_BLOCK_INDEX
Definition: h264.h:807
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:174
#define emms_c()
Definition: internal.h:46
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:401
unsigned int crop_right
frame_cropping_rect_right_offset
Definition: h264.h:172
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1162
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:231
void ff_h264_fill_mbaff_ref_list(H264Context *h)
Definition: h264_refs.c:336
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:711
int frame_recovered
Initial frame has been completely recovered.
Definition: h264.h:638
const char data[16]
Definition: mxf.c:66
int height
Definition: h264.h:280
int mb_x
Definition: h264.h:454
int transform_bypass
qpprime_y_zero_transform_bypass_flag
Definition: h264.h:153
uint8_t * data
Definition: avcodec.h:973
void(* h264_add_pixels8_clear)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:106
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:194
static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
Identify the exact end of the bitstream.
Definition: h264.c:689
int left_mb_xy[LEFT_MBS]
Definition: h264.h:304
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:2237
int top_mb_xy
Definition: h264.h:302
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:43
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: pixfmt.h:78
int ff_h264_get_slice_type(const H264Context *h)
Reconstruct bitstream slice_type.
Definition: h264.c:3966
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
Definition: h264.h:225
#define FF_PROFILE_H264_EXTENDED
Definition: avcodec.h:2630
int chroma_y_shift
Definition: h264.h:282
AVBufferRef * mb_type_buf
Definition: mpegvideo.h:109
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:292
#define MAX_DELAYED_PIC_COUNT
Definition: h264.h:47
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
Picture * next_output_pic
Definition: h264.h:524
high precision timer, useful to profile code
#define AV_COPY64(d, s)
Definition: intreadwrite.h:510
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1761
int luma_log2_weight_denom
Definition: h264.h:383
#define IS_INTERLACED(a)
Definition: mpegvideo.h:165
qpel_mc_func avg_h264_qpel_pixels_tab[4][16]
Definition: h264qpel.h:29
static int h264_set_parameter_from_sps(H264Context *h)
Definition: h264.c:3044
int chroma_weight[48][2][2][2]
Definition: h264.h:387
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264.h:523
static void init_dequant4_coeff_table(H264Context *h)
Definition: h264.c:1252
#define r
Definition: input.c:51
static int pic_is_unused(H264Context *h, Picture *pic)
Definition: h264.c:457
int width
Definition: h264.h:280
const uint8_t * zigzag_scan8x8_cavlc_q0
Definition: h264.h:447
H.264 / AVC / MPEG4 part10 codec.
ThreadFrame tf
Definition: mpegvideo.h:101
int frame_num
Definition: h264.h:500
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:555
int mb_aff
mb_adaptive_frame_field_flag
Definition: h264.h:166
enum AVColorTransferCharacteristic color_trc
Definition: h264.h:181
H264PredContext hpc
Definition: h264.h:316
static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
Definition: h264.c:4873
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:536
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1332
int flags
Additional information about the frame packing.
Definition: stereo3d.h:132
int16_t mb_luma_dc[3][16 *2]
Definition: h264.h:417
static enum AVPixelFormat h264_hwaccel_pixfmt_list_jpeg_420[]
Definition: h264.c:169
static int get_ue_golomb(GetBitContext *gb)
read unsigned exp golomb code.
Definition: golomb.h:53
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:77
static int decode_slice_header(H264Context *h, H264Context *h0)
Decode a slice header.
Definition: h264.c:3336
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264.c:4863
const uint8_t * zigzag_scan_q0
Definition: h264.h:445
MotionEstContext me
Definition: h264.h:265
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
int poc_type
pic_order_cnt_type
Definition: h264.h:155
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
int context_initialized
Definition: h264.h:290
static const uint8_t dequant8_coeff_init_scan[16]
Definition: h264.c:139
void(* h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int height, int log2_denom, int weightd, int weights, int offset)
Definition: h264dsp.h:34
static const uint16_t mask[17]
Definition: lzw.c:38
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264.c:2608
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:740
ParseContext parse_context
Definition: h264.h:266
int nal_unit_type
Definition: h264.h:474
int use_weight_chroma
Definition: h264.h:382
int num_reorder_frames
Definition: h264.h:189
#define AV_RB16
Definition: intreadwrite.h:53
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:64
discard all bidirectional frames
Definition: avcodec.h:544
#define AVERROR(e)
Definition: error.h:43
void ff_h264_direct_ref_list_init(H264Context *const h)
Definition: h264_direct.c:102
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale)
Get the chroma qp.
Definition: h264.h:848
GetBitContext * inter_gb_ptr
Definition: h264.h:413
#define ALZHEIMER_DC_L0T_PRED8x8
Definition: h264pred.h:79
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:142
Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264.h:522
#define IS_SUB_8X4(a)
Definition: mpegvideo.h:173
#define MB_FIELD(h)
Definition: h264.h:63
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2533
int mb_field_decoding_flag
Definition: h264.h:373
int reference
Definition: mpegvideo.h:200
int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
Definition: h264.c:2822
static void flush_dpb(AVCodecContext *avctx)
Definition: h264.c:2790
static int h264_slice_header_init(H264Context *, int)
Definition: h264.c:3170
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:144
void(* h264_idct8_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:82
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
PPS pps
current pps
Definition: h264.h:358
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:434
#define FF_PROFILE_H264_HIGH_422
Definition: avcodec.h:2634
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264.h:583
#define FF_PROFILE_H264_HIGH
Definition: avcodec.h:2631
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1142
struct Picture * next_pic
int direct_spatial_mv_pred
Definition: h264.h:390
static int h264_frame_start(H264Context *h)
Definition: h264.c:1855
void(* h264_luma_dc_dequant_idct)(int16_t *output, int16_t *input, int qmul)
Definition: h264dsp.h:101
0: frame
Definition: h264.h:134
simple assert() macros that are a bit more flexible than ISO C assert().
int weighted_pred
weighted_pred_flag
Definition: h264.h:218
int overread_index
the index into ParseContext.buffer of the overread bytes
Definition: parser.h:36
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:148
const char * name
Name of the codec implementation.
Definition: avcodec.h:2762
#define T(x)
H264QpelContext h264qpel
Definition: h264.h:264
ERContext er
Definition: h264.h:268
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:226
static void predict_field_decoding_flag(H264Context *h)
Definition: h264.c:4282
#define CABAC(h)
Definition: h264.h:85
void ff_init_cabac_states(void)
Definition: cabac.c:124
int ff_h264_decode_mb_cavlc(H264Context *h)
Decode a macroblock.
Definition: h264_cavlc.c:692
static int square(int x)
Definition: roqvideoenc.c:112
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:35
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
Definition: h264.h:403
static void er_add_slice(H264Context *h, int startx, int starty, int endx, int endy, int status)
Definition: h264.c:4326
#define FFMAX(a, b)
Definition: common.h:55
qpel_mc_func put_h264_qpel_pixels_tab[4][16]
Definition: h264qpel.h:28
int delta_pic_order_always_zero_flag
Definition: h264.h:157
#define CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:683
uint8_t * mbintra_table
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:244
int new
flag to keep track if the decoder context needs re-init due to changed SPS
Definition: h264.h:205
int * mb_index2xy
int offset_for_top_to_bottom_field
Definition: h264.h:159
#define IN_RANGE(a, b, size)
Definition: h264.c:1594
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264.h:82
int priv_data_size
Size of HW accelerator private data.
Definition: avcodec.h:2936
int off
Definition: dsputil_bfin.c:29
uint8_t zigzag_scan8x8[64]
Definition: h264.h:440
static int execute_decode_slices(H264Context *h, unsigned context_count)
Call decode_slice() for each context.
Definition: h264.c:4504
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:69
#define pass
Definition: fft_template.c:334
static const uint8_t scan8[16 *3+3]
Definition: h264.h:811
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:182
the normal 2^n-1 "JPEG" YUV ranges
Definition: avcodec.h:596
int crop
frame_cropping_flag
Definition: h264.h:168
uint8_t * error_status_table
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
uint8_t * direct_table
Definition: h264.h:436
AVBufferRef * hwaccel_priv_buf
Definition: mpegvideo.h:151
int ff_pred_weight_table(H264Context *h)
Definition: h264.c:2628
static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int chroma444, int simple, int pixel_shift)
Definition: h264.c:2302
uint8_t scaling_matrix8[6][64]
Definition: h264.h:228
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:509
static void copy_picture_range(Picture **to, Picture **from, int count, H264Context *new_base, H264Context *old_base)
Definition: h264.c:1601
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264.h:482
void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:193
useful rectangle filling function
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: avcodec.h:595
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:222
int refs
number of reference frames
Definition: avcodec.h:1697
CABACContext cabac
Cabac.
Definition: h264.h:423
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
unsigned int left_samples_available
Definition: h264.h:320
#define IS_8X8(a)
Definition: mpegvideo.h:171
AVBufferRef * motion_val_buf[2]
Definition: mpegvideo.h:106
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264.c:186
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1281
int ref_frame_count
num_ref_frames
Definition: h264.h:161
Picture * long_ref[32]
Definition: h264.h:521
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2776
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:168
void ff_h264_filter_mb(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
struct Picture * last_pic
int frame_num_offset
for POC type 2
Definition: h264.h:503
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:406
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2390
int x264_build
Definition: h264.h:452
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2525
uint32_t * mb2br_xy
Definition: h264.h:351
uint8_t * er_temp_buffer
ptrdiff_t linesize
Definition: h264.h:281
int overread
the number of bytes which where irreversibly read from the next frame
Definition: parser.h:35
#define FFMIN(a, b)
Definition: common.h:57
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264.h:366
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:84
int last_index
Definition: parser.h:31
uint8_t field_scan8x8_cavlc[64]
Definition: h264.h:444
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
Definition: h264.h:160
int colour_description_present_flag
Definition: h264.h:179
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:77
int poc
h264 frame POC
Definition: mpegvideo.h:183
AVRational sar
Definition: h264.h:176
int redundant_pic_count
Definition: h264.h:517
#define FIELD_PICTURE(h)
Definition: h264.h:65
int sei_frame_packing_present
frame_packing_arrangment SEI message
Definition: h264.h:588
int width
picture width / height.
Definition: avcodec.h:1217
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:107
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:2597
void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:158
int long_ref_count
number of actual long term references
Definition: h264.h:535
Picture.
Definition: mpegvideo.h:99
void(* h264_idct_dc_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:84
int cabac_init_idc
Definition: h264.h:538
static void implicit_weight_table(H264Context *h, int field)
Initialize implicit_weight table.
Definition: h264.c:2695
void * hwaccel_picture_private
hardware accelerator private data
Definition: mpegvideo.h:155
static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth, int index)
Definition: h264.c:2392
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:398
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int size_in_bits
Definition: get_bits.h:56
SPS sps
current sps
Definition: h264.h:357
int32_t
PPS * pps_buffers[MAX_PPS_COUNT]
Definition: h264.h:489
static av_always_inline void prefetch_motion(H264Context *h, int list, int pixel_shift, int chroma_idc)
Definition: h264.c:1137
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:114
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1733
DSPContext * dsp
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
Definition: avplay.c:396
#define MAX_SPS_COUNT
Definition: h264.h:42
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
Decode PPS.
Definition: h264_ps.c:524
#define FFABS(a)
Definition: common.h:52
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2526
Context Adaptive Binary Arithmetic Coder inline functions.
int level
level
Definition: avcodec.h:2679
int init_qp
pic_init_qp_minus26 + 26
Definition: h264.h:220
int frame_num
h264 frame_num (raw frame_num from slice header)
Definition: mpegvideo.h:184
int mmco_reset
Definition: h264.h:533
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:314
uint8_t * bipred_scratchpad
Definition: h264.h:648
#define AV_EF_EXPLODE
Definition: avcodec.h:2401
int poc_lsb
Definition: h264.h:496
int max_pic_num
max_frame_num or 2 * max_frame_num for field pics.
Definition: h264.h:515
void(* pred4x4_add[2])(uint8_t *pix, int16_t *block, ptrdiff_t stride)
Definition: h264pred.h:100
static int h264_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264.c:4893
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1182
int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
Definition: h264_refs.c:534
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264.c:4337
int ff_set_ref_count(H264Context *h)
Definition: h264.c:3272
unsigned int topright_samples_available
Definition: h264.h:319
#define AV_WN16A(p, v)
Definition: intreadwrite.h:454
const uint8_t * zigzag_scan8x8_q0
Definition: h264.h:446
int curr_pic_num
frame_num for frames or 2 * frame_num + 1 for field pics.
Definition: h264.h:510
int slice_type
Definition: h264.h:367
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264.c:2903
Definition: h264.h:105
static int av_unused get_cabac_terminate(CABACContext *c)
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: mpegvideo.h:92
int top_type
Definition: h264.h:307
#define MB_MBAFF(h)
Definition: h264.h:62
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:223
static void loop_filter(H264Context *h, int start_x, int end_x)
Definition: h264.c:4213
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:401
uint32_t dequant4_buffer[6][QP_MAX_NUM+1][16]
Definition: h264.h:360
ptrdiff_t mb_uvlinesize
Definition: h264.h:355
#define PART_NOT_AVAILABLE
Definition: h264.h:337
unsigned int sps_id
Definition: h264.h:149
unsigned int list_count
Definition: h264.h:402
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2514
#define IS_16X8(a)
Definition: mpegvideo.h:169
static const uint8_t dequant8_coeff_init[6][6]
Definition: h264.c:143
if(ac->has_optimized_func)
GetBitContext intra_gb
Definition: h264.h:410
int dequant_coeff_pps
reinit tables when pps changes
Definition: h264.h:491
#define AVERROR_PATCHWELCOME
Not yet implemented in Libav, patches welcome.
Definition: error.h:57
int pic_order_present
pic_order_present_flag
Definition: h264.h:214
Picture cur_pic
Definition: h264.h:272
SPS * sps_buffers[MAX_SPS_COUNT]
Definition: h264.h:488
static const int8_t mv[256][2]
Definition: 4xm.c:72
struct H264Context * thread_context[MAX_THREADS]
Definition: h264.h:544
AVBufferRef * progress
Definition: thread.h:40
int chroma_log2_weight_denom
Definition: h264.h:384
int bit_depth_luma
luma bit depth from sps to detect changes
Definition: h264.h:485
static void flush_change(H264Context *h)
Definition: h264.c:2769
short offset_for_ref_frame[256]
Definition: h264.h:187
int chroma_format_idc
chroma format from sps to detect changes
Definition: h264.h:486
VideoDSPContext vdsp
Definition: h264.h:261
int timing_info_present_flag
Definition: h264.h:183
NULL
Definition: eval.c:55
AVBufferRef * qscale_table_buf
Definition: mpegvideo.h:103
static void decode_finish_row(H264Context *h)
Draw edges and report progress for the last MB row.
Definition: h264.c:4295
struct Picture * cur_pic
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:568
int mbaff
h264 1 -> MBAFF frame 0-> not MBAFF
Definition: mpegvideo.h:191
static int width
Definition: utils.c:156
int coded_picture_number
picture number in bitstream order
Definition: frame.h:198
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:127
int mb_stride
Definition: h264.h:459
#define AV_LOG_INFO
Standard information.
Definition: log.h:134
AVCodecContext * avctx
Definition: h264.h:259
Libavcodec external API header.
H264 / AVC / MPEG4 part10 codec data table
Definition: h264.h:109
#define FF_PROFILE_H264_HIGH_422_INTRA
Definition: avcodec.h:2635
int slice_alpha_c0_offset
Definition: h264.h:467
1: top field
Definition: h264.h:135
enum AVCodecID codec_id
Definition: avcodec.h:1065
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:96
AVHWAccel.
Definition: avcodec.h:2852
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:472
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:505
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:125
static void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int height, int y_offset, int list0, int list1, int *nrefs)
Definition: h264.c:717
int next_outputed_poc
Definition: h264.h:526
#define LTOP
Definition: h264.h:67
int ff_h264_decode_sei(H264Context *h)
Decode SEI.
Definition: h264_sei.c:202
int poc_msb
Definition: h264.h:497
int debug
debug
Definition: avcodec.h:2348
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:57
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
int implicit_weight[48][48][2]
Definition: h264.h:388
int max_contexts
Max number of threads / contexts.
Definition: h264.h:557
int recovery_frame
recovery_frame is the frame_num at which the next frame should be fully constructed.
Definition: h264.h:625
main external API structure.
Definition: avcodec.h:1054
static void(WINAPI *cond_broadcast)(pthread_cond_t *cond)
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:489
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:489
uint8_t * data
The data buffer.
Definition: buffer.h:89
void(* qpel_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
Definition: dsputil.h:81
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
Definition: h264.c:1947
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:1295
2: bottom field
Definition: h264.h:136
#define QP_MAX_NUM
Definition: h264.h:98
int resync_mb_x
Definition: h264.h:455
int16_t mb[16 *48 *2]
as a dct coeffecient is int32_t in high depth, we need to reserve twice the space.
Definition: h264.h:416
static void init_dequant8_coeff_table(H264Context *h)
Definition: h264.c:1225
int frame_packing_arrangement_type
Definition: h264.h:589
uint32_t state
contains the last few bytes in MSB order
Definition: parser.h:33
int extradata_size
Definition: avcodec.h:1163
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:81
AVBuffer * buffer
Definition: buffer.h:82
int constraint_set_flags
constraint_set[0-3]_flag
Definition: h264.h:204
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:271
SEI_PicStructType sei_pic_struct
pic_struct in picture timing SEI message
Definition: h264.h:575
Picture * short_ref[32]
Definition: h264.h:520
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:224
#define FF_PROFILE_H264_HIGH_10_INTRA
Definition: avcodec.h:2633
int slice_flags
slice flags
Definition: avcodec.h:1551
int coded_height
Definition: avcodec.h:1227
Switching Predicted.
Definition: avutil.h:258
void(* h264_idct8_add4)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:92
static const uint8_t zigzag_scan8x8_cavlc[64]
Definition: h264.c:111
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1294
int slice_beta_offset
Definition: h264.h:468
const uint8_t * field_scan8x8_cavlc_q0
Definition: h264.h:450
static void idct_add(uint8_t *dest, int line_size, int16_t *block)
Definition: dsputil_sh4.c:74
#define CHROMA422(h)
Definition: h264.h:88
int index
Definition: gxfenc.c:72
uint32_t(*[6] dequant8_coeff)[64]
Definition: h264.h:363
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264.c:4996
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:226
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264.h:277
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1747
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1740
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
A reference counted buffer type.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:375
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
Definition: h264.h:274
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, int first_slice)
Definition: h264_refs.c:732
static void await_references(H264Context *h)
Wait until all reference frames are available for MC operations.
Definition: h264.c:760
void(* h264_weight_func)(uint8_t *block, int stride, int height, int log2_denom, int weight, int offset)
Definition: h264dsp.h:32
int16_t mb_padding[256 *2]
as mb is addressed by scantable[i] and scantable is uint8_t we can either check that i is not too lar...
Definition: h264.h:418
#define FF_PROFILE_H264_HIGH_444
Definition: avcodec.h:2636
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:221
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:2329
unsigned int sps_id
Definition: h264.h:212
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:79
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:156
6: bottom field, top field, bottom field repeated, in that order
Definition: h264.h:140
static int init_dimensions(H264Context *h)
Definition: h264.c:3135
AVCodecContext * avctx
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:138
void ff_h264_draw_horiz_band(H264Context *h, int y, int height)
Definition: h264.c:212
void(* h264_idct8_dc_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:86
Views are on top of each other.
Definition: stereo3d.h:55
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264.h:348
uint32_t time_scale
Definition: h264.h:185
int field_poc[2]
h264 top/bottom POC
Definition: mpegvideo.h:182
#define FF_BUG_AUTODETECT
autodetection
Definition: avcodec.h:2294
#define CONFIG_ERROR_RESILIENCE
Definition: config.h:327
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:273
int transform_8x8_mode
transform_8x8_mode_flag
Definition: h264.h:226
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:225
int pic_struct_present_flag
Definition: h264.h:195
Definition: h264.h:103
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:31
uint8_t zigzag_scan[16]
Definition: h264.h:439
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:4983
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:113
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:159
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:252
void ff_h264_filter_mb_fast(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
#define LBOT
Definition: h264.h:68
#define AV_ZERO128(d)
Definition: intreadwrite.h:542
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
Definition: h264chroma.h:24
static const uint8_t dequant4_coeff_init[6][3]
Definition: h264.c:130
int height
Definition: gxfenc.c:72
int8_t * qscale_table
Definition: mpegvideo.h:104
Views are next to each other.
Definition: stereo3d.h:45
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264.c:2759
static int field_end(H264Context *h, int in_setup)
Definition: h264.c:2937
hardware decoding through VDA
Definition: pixfmt.h:159
discard all non reference
Definition: avcodec.h:543
int is_complex
Definition: h264.h:463
AVBufferPool * qscale_table_pool
Definition: h264.h:652
static enum AVPixelFormat h264_hwaccel_pixfmt_list_420[]
Definition: h264.c:152
int slice_context_count
Definition: h264.h:559
int mb_height
pic_height_in_map_units_minus1 + 1
Definition: h264.h:164
AVBufferPool * motion_val_pool
Definition: h264.h:654
Picture * DPB
Definition: h264.h:270
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:368
uint8_t * rbsp_buffer[2]
Definition: h264.h:475
int qscale
Definition: h264.h:284
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:65
#define tprintf(p,...)
Definition: get_bits.h:626
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:786
common internal api header.
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:113
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:196
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:782
#define AV_COPY128(d, s)
Definition: intreadwrite.h:514
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:670
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264.h:354
#define MAX_SLICES
Definition: dxva2_mpeg2.c:25
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264.h:636
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:109
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1766
uint16_t * slice_table_base
Definition: h264.h:493
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:154
int16_t * dc_val[3]
H.264 / AVC / MPEG4 part10 motion vector predicion.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:91
Bi-dir predicted.
Definition: avutil.h:255
AVProfile.
Definition: avcodec.h:2743
int index
Definition: parser.h:30
FF_ENABLE_DEPRECATION_WARNINGS int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:1533
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:79
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2293
AVHWAccel * ff_find_hwaccel(AVCodecContext *avctx)
Return the hardware accelerated codec for codec codec_id and pixel format pix_fmt.
Definition: utils.c:2177
int cur_chroma_format_idc
Definition: h264.h:647
enum AVDiscard skip_loop_filter
Definition: avcodec.h:2687
int den
denominator
Definition: rational.h:45
#define CONFIG_SMALL
Definition: config.h:345
int chroma_qp[2]
Definition: h264.h:275
int sei_ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264.h:598
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:498
uint16_t sub_mb_type[4]
Definition: h264.h:378
static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth, int index, int value)
Definition: h264.c:2401
int bit_depth_luma
bit_depth_luma_minus8 + 8
Definition: h264.h:201
DSP utils.
int intra16x16_pred_mode
Definition: h264.h:299
#define IS_INTRA(x, y)
void * priv_data
Definition: avcodec.h:1090
static const uint8_t field_scan[16]
Definition: h264.c:65
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264.h:502
#define IS_SUB_4X8(a)
Definition: mpegvideo.h:174
Definition: h264.h:104
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2362
void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:109
static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264.c:2206
Picture * cur_pic_ptr
Definition: h264.h:271
Definition: h264.h:107
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:2554
void(* h264_idct_add16intra)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:98
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:297
#define FRAME_MBAFF(h)
Definition: h264.h:64
#define IS_SUB_8X8(a)
Definition: mpegvideo.h:172
const uint16_t ff_h264_mb_sizes[4]
Definition: h264.c:51
#define IS_DIRECT(a)
Definition: mpegvideo.h:166
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:327
int ff_h264_decode_extradata(H264Context *h)
Definition: h264.c:1458
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264.h:321
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1098
#define FF_BUG_TRUNCATED
Definition: avcodec.h:2313
static int fill_filter_caches(H264Context *h, int mb_type)
Definition: h264.c:4068
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:76
int pic_id
h264 pic_num (short -> no wrap version of pic_num, pic_num & max_pic_num; long -> long_pic_num) ...
Definition: mpegvideo.h:186
int content_interpretation_type
Definition: h264.h:590
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:163
#define AV_ZERO32(d)
Definition: intreadwrite.h:534
Views are packed per column.
Definition: stereo3d.h:107
int mb_width
Definition: h264.h:458
enum AVPictureType pict_type
Definition: h264.h:567
int current_slice
current slice number, used to initalize slice_num of each thread/context
Definition: h264.h:549
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:416
int mb_width
pic_width_in_mbs_minus1 + 1
Definition: h264.h:163
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1149
#define IS_16X16(a)
Definition: mpegvideo.h:168
#define AV_RN16A(p)
Definition: intreadwrite.h:442
uint32_t * mb2b_xy
Definition: h264.h:350
#define FF_PROFILE_H264_HIGH_444_INTRA
Definition: avcodec.h:2638
int slice_type_fixed
Definition: h264.h:369
struct AVFrame f
Definition: mpegvideo.h:100
static int copy_parameter_set(void **to, void **from, int count, int size)
Definition: h264.c:1616
int delta_poc_bottom
Definition: h264.h:498
#define SLICE_FLAG_CODED_ORDER
draw_horiz_band() is called in coded order instead of display
Definition: avcodec.h:1552
const uint8_t * field_scan_q0
Definition: h264.h:448
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:205
static void free_tables(H264Context *h, int free_rbsp)
Definition: h264.c:1163
int ff_h264_fill_default_ref_list(H264Context *h)
Fill the default_ref_list.
Definition: h264_refs.c:116
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264.c:1596
H264DSPContext h264dsp
Definition: h264.h:262
void ff_er_frame_start(ERContext *s)
uint8_t field_scan8x8[64]
Definition: h264.h:443
uint32_t * mb_type
Definition: mpegvideo.h:110
#define copy_fields(to, from, start_field, end_field)
Definition: h264.c:1650
#define av_always_inline
Definition: attributes.h:40
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264.h:641
uint8_t * temp
Definition: mpegvideo.h:216
static av_always_inline void mc_part_std(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264.c:983
int8_t * intra4x4_pred_mode
Definition: h264.h:315
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:314
static int init_table_pools(H264Context *h)
Definition: h264.c:374
#define FF_PROFILE_H264_CONSTRAINED
Definition: avcodec.h:2624
static int ref_picture(H264Context *h, Picture *dst, Picture *src)
Definition: h264.c:290
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
Definition: avcodec.h:2628
static int clone_slice(H264Context *dst, H264Context *src)
Replicate H264 "master" context to thread contexts.
Definition: h264.c:2992
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2916
8: frame tripling
Definition: h264.h:142
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264.h:466
static int alloc_scratch_buffers(H264Context *h, int linesize)
Definition: h264.c:349
#define AV_RN64A(p)
Definition: intreadwrite.h:450
#define LIST_NOT_USED
Definition: h264.h:336
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2927
static int alloc_picture(H264Context *h, Picture *pic)
Definition: h264.c:401
uint8_t(* non_zero_count)[48]
Definition: h264.h:329
#define FF_PROFILE_H264_HIGH_10
Definition: avcodec.h:2632
unsigned int crop_bottom
frame_cropping_rect_bottom_offset
Definition: h264.h:174
exp golomb vlc stuff
uint8_t * mbskip_table
int slice_num
Definition: h264.h:365
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:950
int sei_recovery_frame_cnt
recovery_frame_cnt from SEI message
Definition: h264.h:617
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:877
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
int droppable
Definition: h264.h:285
int level_idc
Definition: h264.h:151
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2327
void(* pred8x8l[9+3])(uint8_t *src, int topleft, int topright, ptrdiff_t stride)
Definition: h264pred.h:95
#define STARTCODE_TEST
int nal_ref_idc
Definition: h264.h:473
uint8_t field_scan[16]
Definition: h264.h:442
for(j=16;j >0;--j)
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:118
#define FFMAX3(a, b, c)
Definition: common.h:56
int b_stride
Definition: h264.h:352
Predicted.
Definition: avutil.h:254
unsigned int rbsp_buffer_size[2]
Definition: h264.h:476
Context Adaptive Binary Arithmetic Coder.
int8_t ref_cache[2][5 *8]
Definition: h264.h:335
AVBufferRef * ref_index_buf[2]
Definition: mpegvideo.h:139
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264.h:374
int short_ref_count
number of actual short term references
Definition: h264.h:536
static const AVProfile profiles[]
Definition: h264.c:5007
enum AVColorSpace colorspace
Definition: h264.h:182
static int16_t block[64]
Definition: dct-test.c:170