vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 the ffmpeg project
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 
36 #include "libavutil/imgutils.h"
37 #include "avcodec.h"
38 #include "internal.h"
39 #include "dsputil.h"
40 #include "get_bits.h"
41 #include "videodsp.h"
42 #include "vp3data.h"
43 #include "vp3dsp.h"
44 #include "xiph.h"
45 #include "thread.h"
46 
47 #define FRAGMENT_PIXELS 8
48 
49 //FIXME split things out into their own arrays
50 typedef struct Vp3Fragment {
51  int16_t dc;
54 } Vp3Fragment;
55 
56 #define SB_NOT_CODED 0
57 #define SB_PARTIALLY_CODED 1
58 #define SB_FULLY_CODED 2
59 
60 // This is the maximum length of a single long bit run that can be encoded
61 // for superblock coding or block qps. Theora special-cases this to read a
62 // bit instead of flipping the current bit to allow for runs longer than 4129.
63 #define MAXIMUM_LONG_BIT_RUN 4129
64 
65 #define MODE_INTER_NO_MV 0
66 #define MODE_INTRA 1
67 #define MODE_INTER_PLUS_MV 2
68 #define MODE_INTER_LAST_MV 3
69 #define MODE_INTER_PRIOR_LAST 4
70 #define MODE_USING_GOLDEN 5
71 #define MODE_GOLDEN_MV 6
72 #define MODE_INTER_FOURMV 7
73 #define CODING_MODE_COUNT 8
74 
75 /* special internal mode */
76 #define MODE_COPY 8
77 
78 /* There are 6 preset schemes, plus a free-form scheme */
79 static const int ModeAlphabet[6][CODING_MODE_COUNT] =
80 {
81  /* scheme 1: Last motion vector dominates */
86 
87  /* scheme 2 */
92 
93  /* scheme 3 */
98 
99  /* scheme 4 */
104 
105  /* scheme 5: No motion vector dominates */
110 
111  /* scheme 6 */
116 
117 };
118 
119 static const uint8_t hilbert_offset[16][2] = {
120  {0,0}, {1,0}, {1,1}, {0,1},
121  {0,2}, {0,3}, {1,3}, {1,2},
122  {2,2}, {2,3}, {3,3}, {3,2},
123  {3,1}, {2,1}, {2,0}, {3,0}
124 };
125 
126 #define MIN_DEQUANT_VAL 2
127 
128 typedef struct Vp3DecodeContext {
131  int version;
132  int width, height;
137  int keyframe;
144 
145  int qps[3];
146  int nqps;
147  int last_qps[3];
148 
158  unsigned char *superblock_coding;
159 
163 
167 
170  int data_offset[3];
171 
172  int8_t (*motion_val[2])[2];
173 
175 
176  /* tables */
177  uint16_t coded_dc_scale_factor[64];
178  uint32_t coded_ac_scale_factor[64];
181  uint8_t qr_size [2][3][64];
182  uint16_t qr_base[2][3][64];
183 
201  int16_t *dct_tokens[3][64];
202  int16_t *dct_tokens_base;
203 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
204 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) << 9) + ((zero_run) << 2) + 1)
205 #define TOKEN_COEFF(coeff) (((coeff) << 2) + 2)
206 
210  int num_coded_frags[3][64];
212 
213  /* this is a list of indexes into the all_fragments array indicating
214  * which of the fragments are coded */
216 
217  VLC dc_vlc[16];
222 
227 
228  /* these arrays need to be on 16-byte boundaries since SSE2 operations
229  * index into them */
230  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64];
231 
232  /* This table contains superblock_count * 16 entries. Each set of 16
233  * numbers corresponds to the fragment indexes 0..15 of the superblock.
234  * An entry will be -1 to indicate that no entry corresponds to that
235  * index. */
237 
238  /* This is an array that indicates how a particular macroblock
239  * is coded. */
240  unsigned char *macroblock_coding;
241 
243 
244  /* Huffman decode */
245  int hti;
246  unsigned int hbits;
247  int entries;
249  uint32_t huffman_table[80][32][2];
250 
254 
255 /************************************************************************
256  * VP3 specific functions
257  ************************************************************************/
258 
259 static void vp3_decode_flush(AVCodecContext *avctx)
260 {
261  Vp3DecodeContext *s = avctx->priv_data;
262 
263  if (s->golden_frame.data[0]) {
264  if (s->golden_frame.data[0] == s->last_frame.data[0])
265  memset(&s->last_frame, 0, sizeof(AVFrame));
266  if (s->current_frame.data[0] == s->golden_frame.data[0])
267  memset(&s->current_frame, 0, sizeof(AVFrame));
269  }
270  if (s->last_frame.data[0]) {
271  if (s->current_frame.data[0] == s->last_frame.data[0])
272  memset(&s->current_frame, 0, sizeof(AVFrame));
274  }
275  if (s->current_frame.data[0])
277 }
278 
280 {
281  Vp3DecodeContext *s = avctx->priv_data;
282  int i;
283 
285  av_freep(&s->all_fragments);
290  av_freep(&s->motion_val[0]);
291  av_freep(&s->motion_val[1]);
293 
294  if (avctx->internal->is_copy)
295  return 0;
296 
297  for (i = 0; i < 16; i++) {
298  ff_free_vlc(&s->dc_vlc[i]);
299  ff_free_vlc(&s->ac_vlc_1[i]);
300  ff_free_vlc(&s->ac_vlc_2[i]);
301  ff_free_vlc(&s->ac_vlc_3[i]);
302  ff_free_vlc(&s->ac_vlc_4[i]);
303  }
304 
309 
310  /* release all frames */
311  vp3_decode_flush(avctx);
312 
313  return 0;
314 }
315 
316 /*
317  * This function sets up all of the various blocks mappings:
318  * superblocks <-> fragments, macroblocks <-> fragments,
319  * superblocks <-> macroblocks
320  *
321  * @return 0 is successful; returns 1 if *anything* went wrong.
322  */
324 {
325  int sb_x, sb_y, plane;
326  int x, y, i, j = 0;
327 
328  for (plane = 0; plane < 3; plane++) {
329  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
330  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
331  int frag_width = s->fragment_width[!!plane];
332  int frag_height = s->fragment_height[!!plane];
333 
334  for (sb_y = 0; sb_y < sb_height; sb_y++)
335  for (sb_x = 0; sb_x < sb_width; sb_x++)
336  for (i = 0; i < 16; i++) {
337  x = 4*sb_x + hilbert_offset[i][0];
338  y = 4*sb_y + hilbert_offset[i][1];
339 
340  if (x < frag_width && y < frag_height)
341  s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x;
342  else
343  s->superblock_fragments[j++] = -1;
344  }
345  }
346 
347  return 0; /* successful path out */
348 }
349 
350 /*
351  * This function sets up the dequantization tables used for a particular
352  * frame.
353  */
354 static void init_dequantizer(Vp3DecodeContext *s, int qpi)
355 {
356  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
357  int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]];
358  int i, plane, inter, qri, bmi, bmj, qistart;
359 
360  for(inter=0; inter<2; inter++){
361  for(plane=0; plane<3; plane++){
362  int sum=0;
363  for(qri=0; qri<s->qr_count[inter][plane]; qri++){
364  sum+= s->qr_size[inter][plane][qri];
365  if(s->qps[qpi] <= sum)
366  break;
367  }
368  qistart= sum - s->qr_size[inter][plane][qri];
369  bmi= s->qr_base[inter][plane][qri ];
370  bmj= s->qr_base[inter][plane][qri+1];
371  for(i=0; i<64; i++){
372  int coeff= ( 2*(sum -s->qps[qpi])*s->base_matrix[bmi][i]
373  - 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i]
374  + s->qr_size[inter][plane][qri])
375  / (2*s->qr_size[inter][plane][qri]);
376 
377  int qmin= 8<<(inter + !i);
378  int qscale= i ? ac_scale_factor : dc_scale_factor;
379 
380  s->qmat[qpi][inter][plane][s->dsp.idct_permutation[i]]= av_clip((qscale * coeff)/100 * 4, qmin, 4096);
381  }
382  // all DC coefficients use the same quant so as not to interfere with DC prediction
383  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
384  }
385  }
386 }
387 
388 /*
389  * This function initializes the loop filter boundary limits if the frame's
390  * quality index is different from the previous frame's.
391  *
392  * The filter_limit_values may not be larger than 127.
393  */
395 {
396  int *bounding_values= s->bounding_values_array+127;
397  int filter_limit;
398  int x;
399  int value;
400 
401  filter_limit = s->filter_limit_values[s->qps[0]];
402  assert(filter_limit < 128);
403 
404  /* set up the bounding values */
405  memset(s->bounding_values_array, 0, 256 * sizeof(int));
406  for (x = 0; x < filter_limit; x++) {
407  bounding_values[-x] = -x;
408  bounding_values[x] = x;
409  }
410  for (x = value = filter_limit; x < 128 && value; x++, value--) {
411  bounding_values[ x] = value;
412  bounding_values[-x] = -value;
413  }
414  if (value)
415  bounding_values[128] = value;
416  bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202;
417 }
418 
419 /*
420  * This function unpacks all of the superblock/macroblock/fragment coding
421  * information from the bitstream.
422  */
424 {
425  int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start };
426  int bit = 0;
427  int current_superblock = 0;
428  int current_run = 0;
429  int num_partial_superblocks = 0;
430 
431  int i, j;
432  int current_fragment;
433  int plane;
434 
435  if (s->keyframe) {
437 
438  } else {
439 
440  /* unpack the list of partially-coded superblocks */
441  bit = get_bits1(gb) ^ 1;
442  current_run = 0;
443 
444  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
445  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
446  bit = get_bits1(gb);
447  else
448  bit ^= 1;
449 
450  current_run = get_vlc2(gb,
451  s->superblock_run_length_vlc.table, 6, 2) + 1;
452  if (current_run == 34)
453  current_run += get_bits(gb, 12);
454 
455  if (current_superblock + current_run > s->superblock_count) {
456  av_log(s->avctx, AV_LOG_ERROR, "Invalid partially coded superblock run length\n");
457  return -1;
458  }
459 
460  memset(s->superblock_coding + current_superblock, bit, current_run);
461 
462  current_superblock += current_run;
463  if (bit)
464  num_partial_superblocks += current_run;
465  }
466 
467  /* unpack the list of fully coded superblocks if any of the blocks were
468  * not marked as partially coded in the previous step */
469  if (num_partial_superblocks < s->superblock_count) {
470  int superblocks_decoded = 0;
471 
472  current_superblock = 0;
473  bit = get_bits1(gb) ^ 1;
474  current_run = 0;
475 
476  while (superblocks_decoded < s->superblock_count - num_partial_superblocks
477  && get_bits_left(gb) > 0) {
478 
479  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
480  bit = get_bits1(gb);
481  else
482  bit ^= 1;
483 
484  current_run = get_vlc2(gb,
485  s->superblock_run_length_vlc.table, 6, 2) + 1;
486  if (current_run == 34)
487  current_run += get_bits(gb, 12);
488 
489  for (j = 0; j < current_run; current_superblock++) {
490  if (current_superblock >= s->superblock_count) {
491  av_log(s->avctx, AV_LOG_ERROR, "Invalid fully coded superblock run length\n");
492  return -1;
493  }
494 
495  /* skip any superblocks already marked as partially coded */
496  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
497  s->superblock_coding[current_superblock] = 2*bit;
498  j++;
499  }
500  }
501  superblocks_decoded += current_run;
502  }
503  }
504 
505  /* if there were partial blocks, initialize bitstream for
506  * unpacking fragment codings */
507  if (num_partial_superblocks) {
508 
509  current_run = 0;
510  bit = get_bits1(gb);
511  /* toggle the bit because as soon as the first run length is
512  * fetched the bit will be toggled again */
513  bit ^= 1;
514  }
515  }
516 
517  /* figure out which fragments are coded; iterate through each
518  * superblock (all planes) */
519  s->total_num_coded_frags = 0;
521 
522  for (plane = 0; plane < 3; plane++) {
523  int sb_start = superblock_starts[plane];
524  int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count);
525  int num_coded_frags = 0;
526 
527  for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
528 
529  /* iterate through all 16 fragments in a superblock */
530  for (j = 0; j < 16; j++) {
531 
532  /* if the fragment is in bounds, check its coding status */
533  current_fragment = s->superblock_fragments[i * 16 + j];
534  if (current_fragment != -1) {
535  int coded = s->superblock_coding[i];
536 
537  if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
538 
539  /* fragment may or may not be coded; this is the case
540  * that cares about the fragment coding runs */
541  if (current_run-- == 0) {
542  bit ^= 1;
543  current_run = get_vlc2(gb,
544  s->fragment_run_length_vlc.table, 5, 2);
545  }
546  coded = bit;
547  }
548 
549  if (coded) {
550  /* default mode; actual mode will be decoded in
551  * the next phase */
552  s->all_fragments[current_fragment].coding_method =
554  s->coded_fragment_list[plane][num_coded_frags++] =
555  current_fragment;
556  } else {
557  /* not coded; copy this fragment from the prior frame */
558  s->all_fragments[current_fragment].coding_method =
559  MODE_COPY;
560  }
561  }
562  }
563  }
564  s->total_num_coded_frags += num_coded_frags;
565  for (i = 0; i < 64; i++)
566  s->num_coded_frags[plane][i] = num_coded_frags;
567  if (plane < 2)
568  s->coded_fragment_list[plane+1] = s->coded_fragment_list[plane] + num_coded_frags;
569  }
570  return 0;
571 }
572 
573 /*
574  * This function unpacks all the coding mode data for individual macroblocks
575  * from the bitstream.
576  */
578 {
579  int i, j, k, sb_x, sb_y;
580  int scheme;
581  int current_macroblock;
582  int current_fragment;
583  int coding_mode;
584  int custom_mode_alphabet[CODING_MODE_COUNT];
585  const int *alphabet;
586  Vp3Fragment *frag;
587 
588  if (s->keyframe) {
589  for (i = 0; i < s->fragment_count; i++)
591 
592  } else {
593 
594  /* fetch the mode coding scheme for this frame */
595  scheme = get_bits(gb, 3);
596 
597  /* is it a custom coding scheme? */
598  if (scheme == 0) {
599  for (i = 0; i < 8; i++)
600  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
601  for (i = 0; i < 8; i++)
602  custom_mode_alphabet[get_bits(gb, 3)] = i;
603  alphabet = custom_mode_alphabet;
604  } else
605  alphabet = ModeAlphabet[scheme-1];
606 
607  /* iterate through all of the macroblocks that contain 1 or more
608  * coded fragments */
609  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
610  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
611  if (get_bits_left(gb) <= 0)
612  return -1;
613 
614  for (j = 0; j < 4; j++) {
615  int mb_x = 2*sb_x + (j>>1);
616  int mb_y = 2*sb_y + (((j>>1)+j)&1);
617  current_macroblock = mb_y * s->macroblock_width + mb_x;
618 
619  if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height)
620  continue;
621 
622 #define BLOCK_X (2*mb_x + (k&1))
623 #define BLOCK_Y (2*mb_y + (k>>1))
624  /* coding modes are only stored if the macroblock has at least one
625  * luma block coded, otherwise it must be INTER_NO_MV */
626  for (k = 0; k < 4; k++) {
627  current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
628  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
629  break;
630  }
631  if (k == 4) {
632  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
633  continue;
634  }
635 
636  /* mode 7 means get 3 bits for each coding mode */
637  if (scheme == 7)
638  coding_mode = get_bits(gb, 3);
639  else
640  coding_mode = alphabet
641  [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
642 
643  s->macroblock_coding[current_macroblock] = coding_mode;
644  for (k = 0; k < 4; k++) {
645  frag = s->all_fragments + BLOCK_Y*s->fragment_width[0] + BLOCK_X;
646  if (frag->coding_method != MODE_COPY)
647  frag->coding_method = coding_mode;
648  }
649 
650 #define SET_CHROMA_MODES \
651  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
652  frag[s->fragment_start[1]].coding_method = coding_mode;\
653  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
654  frag[s->fragment_start[2]].coding_method = coding_mode;
655 
656  if (s->chroma_y_shift) {
657  frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x;
659  } else if (s->chroma_x_shift) {
660  frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x;
661  for (k = 0; k < 2; k++) {
663  frag += s->fragment_width[1];
664  }
665  } else {
666  for (k = 0; k < 4; k++) {
667  frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X;
669  }
670  }
671  }
672  }
673  }
674  }
675 
676  return 0;
677 }
678 
679 /*
680  * This function unpacks all the motion vectors for the individual
681  * macroblocks from the bitstream.
682  */
684 {
685  int j, k, sb_x, sb_y;
686  int coding_mode;
687  int motion_x[4];
688  int motion_y[4];
689  int last_motion_x = 0;
690  int last_motion_y = 0;
691  int prior_last_motion_x = 0;
692  int prior_last_motion_y = 0;
693  int current_macroblock;
694  int current_fragment;
695  int frag;
696 
697  if (s->keyframe)
698  return 0;
699 
700  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
701  coding_mode = get_bits1(gb);
702 
703  /* iterate through all of the macroblocks that contain 1 or more
704  * coded fragments */
705  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
706  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
707  if (get_bits_left(gb) <= 0)
708  return -1;
709 
710  for (j = 0; j < 4; j++) {
711  int mb_x = 2*sb_x + (j>>1);
712  int mb_y = 2*sb_y + (((j>>1)+j)&1);
713  current_macroblock = mb_y * s->macroblock_width + mb_x;
714 
715  if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height ||
716  (s->macroblock_coding[current_macroblock] == MODE_COPY))
717  continue;
718 
719  switch (s->macroblock_coding[current_macroblock]) {
720 
721  case MODE_INTER_PLUS_MV:
722  case MODE_GOLDEN_MV:
723  /* all 6 fragments use the same motion vector */
724  if (coding_mode == 0) {
725  motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
726  motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
727  } else {
728  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
729  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
730  }
731 
732  /* vector maintenance, only on MODE_INTER_PLUS_MV */
733  if (s->macroblock_coding[current_macroblock] ==
735  prior_last_motion_x = last_motion_x;
736  prior_last_motion_y = last_motion_y;
737  last_motion_x = motion_x[0];
738  last_motion_y = motion_y[0];
739  }
740  break;
741 
742  case MODE_INTER_FOURMV:
743  /* vector maintenance */
744  prior_last_motion_x = last_motion_x;
745  prior_last_motion_y = last_motion_y;
746 
747  /* fetch 4 vectors from the bitstream, one for each
748  * Y fragment, then average for the C fragment vectors */
749  for (k = 0; k < 4; k++) {
750  current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
751  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
752  if (coding_mode == 0) {
753  motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
754  motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
755  } else {
756  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
757  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
758  }
759  last_motion_x = motion_x[k];
760  last_motion_y = motion_y[k];
761  } else {
762  motion_x[k] = 0;
763  motion_y[k] = 0;
764  }
765  }
766  break;
767 
768  case MODE_INTER_LAST_MV:
769  /* all 6 fragments use the last motion vector */
770  motion_x[0] = last_motion_x;
771  motion_y[0] = last_motion_y;
772 
773  /* no vector maintenance (last vector remains the
774  * last vector) */
775  break;
776 
778  /* all 6 fragments use the motion vector prior to the
779  * last motion vector */
780  motion_x[0] = prior_last_motion_x;
781  motion_y[0] = prior_last_motion_y;
782 
783  /* vector maintenance */
784  prior_last_motion_x = last_motion_x;
785  prior_last_motion_y = last_motion_y;
786  last_motion_x = motion_x[0];
787  last_motion_y = motion_y[0];
788  break;
789 
790  default:
791  /* covers intra, inter without MV, golden without MV */
792  motion_x[0] = 0;
793  motion_y[0] = 0;
794 
795  /* no vector maintenance */
796  break;
797  }
798 
799  /* assign the motion vectors to the correct fragments */
800  for (k = 0; k < 4; k++) {
801  current_fragment =
803  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
804  s->motion_val[0][current_fragment][0] = motion_x[k];
805  s->motion_val[0][current_fragment][1] = motion_y[k];
806  } else {
807  s->motion_val[0][current_fragment][0] = motion_x[0];
808  s->motion_val[0][current_fragment][1] = motion_y[0];
809  }
810  }
811 
812  if (s->chroma_y_shift) {
813  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
814  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2);
815  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2);
816  }
817  motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
818  motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1);
819  frag = mb_y*s->fragment_width[1] + mb_x;
820  s->motion_val[1][frag][0] = motion_x[0];
821  s->motion_val[1][frag][1] = motion_y[0];
822  } else if (s->chroma_x_shift) {
823  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
824  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
825  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
826  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
827  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
828  } else {
829  motion_x[1] = motion_x[0];
830  motion_y[1] = motion_y[0];
831  }
832  motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
833  motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1);
834 
835  frag = 2*mb_y*s->fragment_width[1] + mb_x;
836  for (k = 0; k < 2; k++) {
837  s->motion_val[1][frag][0] = motion_x[k];
838  s->motion_val[1][frag][1] = motion_y[k];
839  frag += s->fragment_width[1];
840  }
841  } else {
842  for (k = 0; k < 4; k++) {
843  frag = BLOCK_Y*s->fragment_width[1] + BLOCK_X;
844  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
845  s->motion_val[1][frag][0] = motion_x[k];
846  s->motion_val[1][frag][1] = motion_y[k];
847  } else {
848  s->motion_val[1][frag][0] = motion_x[0];
849  s->motion_val[1][frag][1] = motion_y[0];
850  }
851  }
852  }
853  }
854  }
855  }
856 
857  return 0;
858 }
859 
861 {
862  int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi;
863  int num_blocks = s->total_num_coded_frags;
864 
865  for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) {
866  i = blocks_decoded = num_blocks_at_qpi = 0;
867 
868  bit = get_bits1(gb) ^ 1;
869  run_length = 0;
870 
871  do {
872  if (run_length == MAXIMUM_LONG_BIT_RUN)
873  bit = get_bits1(gb);
874  else
875  bit ^= 1;
876 
877  run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1;
878  if (run_length == 34)
879  run_length += get_bits(gb, 12);
880  blocks_decoded += run_length;
881 
882  if (!bit)
883  num_blocks_at_qpi += run_length;
884 
885  for (j = 0; j < run_length; i++) {
886  if (i >= s->total_num_coded_frags)
887  return -1;
888 
889  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
890  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
891  j++;
892  }
893  }
894  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
895 
896  num_blocks -= num_blocks_at_qpi;
897  }
898 
899  return 0;
900 }
901 
902 /*
903  * This function is called by unpack_dct_coeffs() to extract the VLCs from
904  * the bitstream. The VLCs encode tokens which are used to unpack DCT
905  * data. This function unpacks all the VLCs for either the Y plane or both
906  * C planes, and is called for DC coefficients or different AC coefficient
907  * levels (since different coefficient types require different VLC tables.
908  *
909  * This function returns a residual eob run. E.g, if a particular token gave
910  * instructions to EOB the next 5 fragments and there were only 2 fragments
911  * left in the current fragment range, 3 would be returned so that it could
912  * be passed into the next call to this same function.
913  */
915  VLC *table, int coeff_index,
916  int plane,
917  int eob_run)
918 {
919  int i, j = 0;
920  int token;
921  int zero_run = 0;
922  DCTELEM coeff = 0;
923  int bits_to_get;
924  int blocks_ended;
925  int coeff_i = 0;
926  int num_coeffs = s->num_coded_frags[plane][coeff_index];
927  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
928 
929  /* local references to structure members to avoid repeated deferences */
930  int *coded_fragment_list = s->coded_fragment_list[plane];
931  Vp3Fragment *all_fragments = s->all_fragments;
932  VLC_TYPE (*vlc_table)[2] = table->table;
933 
934  if (num_coeffs < 0)
935  av_log(s->avctx, AV_LOG_ERROR, "Invalid number of coefficents at level %d\n", coeff_index);
936 
937  if (eob_run > num_coeffs) {
938  coeff_i = blocks_ended = num_coeffs;
939  eob_run -= num_coeffs;
940  } else {
941  coeff_i = blocks_ended = eob_run;
942  eob_run = 0;
943  }
944 
945  // insert fake EOB token to cover the split between planes or zzi
946  if (blocks_ended)
947  dct_tokens[j++] = blocks_ended << 2;
948 
949  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
950  /* decode a VLC into a token */
951  token = get_vlc2(gb, vlc_table, 11, 3);
952  /* use the token to get a zero run, a coefficient, and an eob run */
953  if ((unsigned) token <= 6U) {
954  eob_run = eob_run_base[token];
955  if (eob_run_get_bits[token])
956  eob_run += get_bits(gb, eob_run_get_bits[token]);
957 
958  // record only the number of blocks ended in this plane,
959  // any spill will be recorded in the next plane.
960  if (eob_run > num_coeffs - coeff_i) {
961  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
962  blocks_ended += num_coeffs - coeff_i;
963  eob_run -= num_coeffs - coeff_i;
964  coeff_i = num_coeffs;
965  } else {
966  dct_tokens[j++] = TOKEN_EOB(eob_run);
967  blocks_ended += eob_run;
968  coeff_i += eob_run;
969  eob_run = 0;
970  }
971  } else if (token >= 0) {
972  bits_to_get = coeff_get_bits[token];
973  if (bits_to_get)
974  bits_to_get = get_bits(gb, bits_to_get);
975  coeff = coeff_tables[token][bits_to_get];
976 
977  zero_run = zero_run_base[token];
978  if (zero_run_get_bits[token])
979  zero_run += get_bits(gb, zero_run_get_bits[token]);
980 
981  if (zero_run) {
982  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
983  } else {
984  // Save DC into the fragment structure. DC prediction is
985  // done in raster order, so the actual DC can't be in with
986  // other tokens. We still need the token in dct_tokens[]
987  // however, or else the structure collapses on itself.
988  if (!coeff_index)
989  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
990 
991  dct_tokens[j++] = TOKEN_COEFF(coeff);
992  }
993 
994  if (coeff_index + zero_run > 64) {
995  av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with"
996  " %d coeffs left\n", zero_run, 64-coeff_index);
997  zero_run = 64 - coeff_index;
998  }
999 
1000  // zero runs code multiple coefficients,
1001  // so don't try to decode coeffs for those higher levels
1002  for (i = coeff_index+1; i <= coeff_index+zero_run; i++)
1003  s->num_coded_frags[plane][i]--;
1004  coeff_i++;
1005  } else {
1007  "Invalid token %d\n", token);
1008  return -1;
1009  }
1010  }
1011 
1012  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1013  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1014 
1015  // decrement the number of blocks that have higher coeffecients for each
1016  // EOB run at this level
1017  if (blocks_ended)
1018  for (i = coeff_index+1; i < 64; i++)
1019  s->num_coded_frags[plane][i] -= blocks_ended;
1020 
1021  // setup the next buffer
1022  if (plane < 2)
1023  s->dct_tokens[plane+1][coeff_index] = dct_tokens + j;
1024  else if (coeff_index < 63)
1025  s->dct_tokens[0][coeff_index+1] = dct_tokens + j;
1026 
1027  return eob_run;
1028 }
1029 
1031  int first_fragment,
1032  int fragment_width,
1033  int fragment_height);
1034 /*
1035  * This function unpacks all of the DCT coefficient data from the
1036  * bitstream.
1037  */
1039 {
1040  int i;
1041  int dc_y_table;
1042  int dc_c_table;
1043  int ac_y_table;
1044  int ac_c_table;
1045  int residual_eob_run = 0;
1046  VLC *y_tables[64];
1047  VLC *c_tables[64];
1048 
1049  s->dct_tokens[0][0] = s->dct_tokens_base;
1050 
1051  /* fetch the DC table indexes */
1052  dc_y_table = get_bits(gb, 4);
1053  dc_c_table = get_bits(gb, 4);
1054 
1055  /* unpack the Y plane DC coefficients */
1056  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
1057  0, residual_eob_run);
1058  if (residual_eob_run < 0)
1059  return residual_eob_run;
1060 
1061  /* reverse prediction of the Y-plane DC coefficients */
1063 
1064  /* unpack the C plane DC coefficients */
1065  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1066  1, residual_eob_run);
1067  if (residual_eob_run < 0)
1068  return residual_eob_run;
1069  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1070  2, residual_eob_run);
1071  if (residual_eob_run < 0)
1072  return residual_eob_run;
1073 
1074  /* reverse prediction of the C-plane DC coefficients */
1075  if (!(s->avctx->flags & CODEC_FLAG_GRAY))
1076  {
1078  s->fragment_width[1], s->fragment_height[1]);
1080  s->fragment_width[1], s->fragment_height[1]);
1081  }
1082 
1083  /* fetch the AC table indexes */
1084  ac_y_table = get_bits(gb, 4);
1085  ac_c_table = get_bits(gb, 4);
1086 
1087  /* build tables of AC VLC tables */
1088  for (i = 1; i <= 5; i++) {
1089  y_tables[i] = &s->ac_vlc_1[ac_y_table];
1090  c_tables[i] = &s->ac_vlc_1[ac_c_table];
1091  }
1092  for (i = 6; i <= 14; i++) {
1093  y_tables[i] = &s->ac_vlc_2[ac_y_table];
1094  c_tables[i] = &s->ac_vlc_2[ac_c_table];
1095  }
1096  for (i = 15; i <= 27; i++) {
1097  y_tables[i] = &s->ac_vlc_3[ac_y_table];
1098  c_tables[i] = &s->ac_vlc_3[ac_c_table];
1099  }
1100  for (i = 28; i <= 63; i++) {
1101  y_tables[i] = &s->ac_vlc_4[ac_y_table];
1102  c_tables[i] = &s->ac_vlc_4[ac_c_table];
1103  }
1104 
1105  /* decode all AC coefficents */
1106  for (i = 1; i <= 63; i++) {
1107  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1108  0, residual_eob_run);
1109  if (residual_eob_run < 0)
1110  return residual_eob_run;
1111 
1112  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1113  1, residual_eob_run);
1114  if (residual_eob_run < 0)
1115  return residual_eob_run;
1116  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1117  2, residual_eob_run);
1118  if (residual_eob_run < 0)
1119  return residual_eob_run;
1120  }
1121 
1122  return 0;
1123 }
1124 
1125 /*
1126  * This function reverses the DC prediction for each coded fragment in
1127  * the frame. Much of this function is adapted directly from the original
1128  * VP3 source code.
1129  */
1130 #define COMPATIBLE_FRAME(x) \
1131  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1132 #define DC_COEFF(u) s->all_fragments[u].dc
1133 
1135  int first_fragment,
1136  int fragment_width,
1137  int fragment_height)
1138 {
1139 
1140 #define PUL 8
1141 #define PU 4
1142 #define PUR 2
1143 #define PL 1
1144 
1145  int x, y;
1146  int i = first_fragment;
1147 
1148  int predicted_dc;
1149 
1150  /* DC values for the left, up-left, up, and up-right fragments */
1151  int vl, vul, vu, vur;
1152 
1153  /* indexes for the left, up-left, up, and up-right fragments */
1154  int l, ul, u, ur;
1155 
1156  /*
1157  * The 6 fields mean:
1158  * 0: up-left multiplier
1159  * 1: up multiplier
1160  * 2: up-right multiplier
1161  * 3: left multiplier
1162  */
1163  static const int predictor_transform[16][4] = {
1164  { 0, 0, 0, 0},
1165  { 0, 0, 0,128}, // PL
1166  { 0, 0,128, 0}, // PUR
1167  { 0, 0, 53, 75}, // PUR|PL
1168  { 0,128, 0, 0}, // PU
1169  { 0, 64, 0, 64}, // PU|PL
1170  { 0,128, 0, 0}, // PU|PUR
1171  { 0, 0, 53, 75}, // PU|PUR|PL
1172  {128, 0, 0, 0}, // PUL
1173  { 0, 0, 0,128}, // PUL|PL
1174  { 64, 0, 64, 0}, // PUL|PUR
1175  { 0, 0, 53, 75}, // PUL|PUR|PL
1176  { 0,128, 0, 0}, // PUL|PU
1177  {-104,116, 0,116}, // PUL|PU|PL
1178  { 24, 80, 24, 0}, // PUL|PU|PUR
1179  {-104,116, 0,116} // PUL|PU|PUR|PL
1180  };
1181 
1182  /* This table shows which types of blocks can use other blocks for
1183  * prediction. For example, INTRA is the only mode in this table to
1184  * have a frame number of 0. That means INTRA blocks can only predict
1185  * from other INTRA blocks. There are 2 golden frame coding types;
1186  * blocks encoding in these modes can only predict from other blocks
1187  * that were encoded with these 1 of these 2 modes. */
1188  static const unsigned char compatible_frame[9] = {
1189  1, /* MODE_INTER_NO_MV */
1190  0, /* MODE_INTRA */
1191  1, /* MODE_INTER_PLUS_MV */
1192  1, /* MODE_INTER_LAST_MV */
1193  1, /* MODE_INTER_PRIOR_MV */
1194  2, /* MODE_USING_GOLDEN */
1195  2, /* MODE_GOLDEN_MV */
1196  1, /* MODE_INTER_FOUR_MV */
1197  3 /* MODE_COPY */
1198  };
1199  int current_frame_type;
1200 
1201  /* there is a last DC predictor for each of the 3 frame types */
1202  short last_dc[3];
1203 
1204  int transform = 0;
1205 
1206  vul = vu = vur = vl = 0;
1207  last_dc[0] = last_dc[1] = last_dc[2] = 0;
1208 
1209  /* for each fragment row... */
1210  for (y = 0; y < fragment_height; y++) {
1211 
1212  /* for each fragment in a row... */
1213  for (x = 0; x < fragment_width; x++, i++) {
1214 
1215  /* reverse prediction if this block was coded */
1216  if (s->all_fragments[i].coding_method != MODE_COPY) {
1217 
1218  current_frame_type =
1219  compatible_frame[s->all_fragments[i].coding_method];
1220 
1221  transform= 0;
1222  if(x){
1223  l= i-1;
1224  vl = DC_COEFF(l);
1225  if(COMPATIBLE_FRAME(l))
1226  transform |= PL;
1227  }
1228  if(y){
1229  u= i-fragment_width;
1230  vu = DC_COEFF(u);
1231  if(COMPATIBLE_FRAME(u))
1232  transform |= PU;
1233  if(x){
1234  ul= i-fragment_width-1;
1235  vul = DC_COEFF(ul);
1236  if(COMPATIBLE_FRAME(ul))
1237  transform |= PUL;
1238  }
1239  if(x + 1 < fragment_width){
1240  ur= i-fragment_width+1;
1241  vur = DC_COEFF(ur);
1242  if(COMPATIBLE_FRAME(ur))
1243  transform |= PUR;
1244  }
1245  }
1246 
1247  if (transform == 0) {
1248 
1249  /* if there were no fragments to predict from, use last
1250  * DC saved */
1251  predicted_dc = last_dc[current_frame_type];
1252  } else {
1253 
1254  /* apply the appropriate predictor transform */
1255  predicted_dc =
1256  (predictor_transform[transform][0] * vul) +
1257  (predictor_transform[transform][1] * vu) +
1258  (predictor_transform[transform][2] * vur) +
1259  (predictor_transform[transform][3] * vl);
1260 
1261  predicted_dc /= 128;
1262 
1263  /* check for outranging on the [ul u l] and
1264  * [ul u ur l] predictors */
1265  if ((transform == 15) || (transform == 13)) {
1266  if (FFABS(predicted_dc - vu) > 128)
1267  predicted_dc = vu;
1268  else if (FFABS(predicted_dc - vl) > 128)
1269  predicted_dc = vl;
1270  else if (FFABS(predicted_dc - vul) > 128)
1271  predicted_dc = vul;
1272  }
1273  }
1274 
1275  /* at long last, apply the predictor */
1276  DC_COEFF(i) += predicted_dc;
1277  /* save the DC */
1278  last_dc[current_frame_type] = DC_COEFF(i);
1279  }
1280  }
1281  }
1282 }
1283 
1284 static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
1285 {
1286  int x, y;
1287  int *bounding_values= s->bounding_values_array+127;
1288 
1289  int width = s->fragment_width[!!plane];
1290  int height = s->fragment_height[!!plane];
1291  int fragment = s->fragment_start [plane] + ystart * width;
1292  int stride = s->current_frame.linesize[plane];
1293  uint8_t *plane_data = s->current_frame.data [plane];
1294  if (!s->flipped_image) stride = -stride;
1295  plane_data += s->data_offset[plane] + 8*ystart*stride;
1296 
1297  for (y = ystart; y < yend; y++) {
1298 
1299  for (x = 0; x < width; x++) {
1300  /* This code basically just deblocks on the edges of coded blocks.
1301  * However, it has to be much more complicated because of the
1302  * braindamaged deblock ordering used in VP3/Theora. Order matters
1303  * because some pixels get filtered twice. */
1304  if( s->all_fragments[fragment].coding_method != MODE_COPY )
1305  {
1306  /* do not perform left edge filter for left columns frags */
1307  if (x > 0) {
1308  s->vp3dsp.h_loop_filter(
1309  plane_data + 8*x,
1310  stride, bounding_values);
1311  }
1312 
1313  /* do not perform top edge filter for top row fragments */
1314  if (y > 0) {
1315  s->vp3dsp.v_loop_filter(
1316  plane_data + 8*x,
1317  stride, bounding_values);
1318  }
1319 
1320  /* do not perform right edge filter for right column
1321  * fragments or if right fragment neighbor is also coded
1322  * in this frame (it will be filtered in next iteration) */
1323  if ((x < width - 1) &&
1324  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1325  s->vp3dsp.h_loop_filter(
1326  plane_data + 8*x + 8,
1327  stride, bounding_values);
1328  }
1329 
1330  /* do not perform bottom edge filter for bottom row
1331  * fragments or if bottom fragment neighbor is also coded
1332  * in this frame (it will be filtered in the next row) */
1333  if ((y < height - 1) &&
1334  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1335  s->vp3dsp.v_loop_filter(
1336  plane_data + 8*x + 8*stride,
1337  stride, bounding_values);
1338  }
1339  }
1340 
1341  fragment++;
1342  }
1343  plane_data += 8*stride;
1344  }
1345 }
1346 
1351 static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag,
1352  int plane, int inter, DCTELEM block[64])
1353 {
1354  int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1355  uint8_t *perm = s->scantable.permutated;
1356  int i = 0;
1357 
1358  do {
1359  int token = *s->dct_tokens[plane][i];
1360  switch (token & 3) {
1361  case 0: // EOB
1362  if (--token < 4) // 0-3 are token types, so the EOB run must now be 0
1363  s->dct_tokens[plane][i]++;
1364  else
1365  *s->dct_tokens[plane][i] = token & ~3;
1366  goto end;
1367  case 1: // zero run
1368  s->dct_tokens[plane][i]++;
1369  i += (token >> 2) & 0x7f;
1370  if (i > 63) {
1371  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1372  return i;
1373  }
1374  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1375  i++;
1376  break;
1377  case 2: // coeff
1378  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1379  s->dct_tokens[plane][i++]++;
1380  break;
1381  default: // shouldn't happen
1382  return i;
1383  }
1384  } while (i < 64);
1385  // return value is expected to be a valid level
1386  i--;
1387 end:
1388  // the actual DC+prediction is in the fragment structure
1389  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1390  return i;
1391 }
1392 
1397 {
1398  int h, cy, i;
1399  int offset[AV_NUM_DATA_POINTERS];
1400 
1402  int y_flipped = s->flipped_image ? s->avctx->height-y : y;
1403 
1404  // At the end of the frame, report INT_MAX instead of the height of the frame.
1405  // This makes the other threads' ff_thread_await_progress() calls cheaper, because
1406  // they don't have to clip their values.
1407  ff_thread_report_progress(&s->current_frame, y_flipped==s->avctx->height ? INT_MAX : y_flipped-1, 0);
1408  }
1409 
1410  if(s->avctx->draw_horiz_band==NULL)
1411  return;
1412 
1413  h= y - s->last_slice_end;
1414  s->last_slice_end= y;
1415  y -= h;
1416 
1417  if (!s->flipped_image) {
1418  y = s->avctx->height - y - h;
1419  }
1420 
1421  cy = y >> s->chroma_y_shift;
1422  offset[0] = s->current_frame.linesize[0]*y;
1423  offset[1] = s->current_frame.linesize[1]*cy;
1424  offset[2] = s->current_frame.linesize[2]*cy;
1425  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
1426  offset[i] = 0;
1427 
1428  emms_c();
1429  s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h);
1430 }
1431 
1436 static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
1437 {
1438  AVFrame *ref_frame;
1439  int ref_row;
1440  int border = motion_y&1;
1441 
1442  if (fragment->coding_method == MODE_USING_GOLDEN ||
1443  fragment->coding_method == MODE_GOLDEN_MV)
1444  ref_frame = &s->golden_frame;
1445  else
1446  ref_frame = &s->last_frame;
1447 
1448  ref_row = y + (motion_y>>1);
1449  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1450 
1451  ff_thread_await_progress(ref_frame, ref_row, 0);
1452 }
1453 
1454 /*
1455  * Perform the final rendering for a particular slice of data.
1456  * The slice number ranges from 0..(c_superblock_height - 1).
1457  */
1458 static void render_slice(Vp3DecodeContext *s, int slice)
1459 {
1460  int x, y, i, j, fragment;
1461  LOCAL_ALIGNED_16(DCTELEM, block, [64]);
1462  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
1463  int motion_halfpel_index;
1464  uint8_t *motion_source;
1465  int plane, first_pixel;
1466 
1467  if (slice >= s->c_superblock_height)
1468  return;
1469 
1470  for (plane = 0; plane < 3; plane++) {
1471  uint8_t *output_plane = s->current_frame.data [plane] + s->data_offset[plane];
1472  uint8_t * last_plane = s-> last_frame.data [plane] + s->data_offset[plane];
1473  uint8_t *golden_plane = s-> golden_frame.data [plane] + s->data_offset[plane];
1474  int stride = s->current_frame.linesize[plane];
1475  int plane_width = s->width >> (plane && s->chroma_x_shift);
1476  int plane_height = s->height >> (plane && s->chroma_y_shift);
1477  int8_t (*motion_val)[2] = s->motion_val[!!plane];
1478 
1479  int sb_x, sb_y = slice << (!plane && s->chroma_y_shift);
1480  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
1481  int slice_width = plane ? s->c_superblock_width : s->y_superblock_width;
1482 
1483  int fragment_width = s->fragment_width[!!plane];
1484  int fragment_height = s->fragment_height[!!plane];
1485  int fragment_start = s->fragment_start[plane];
1486  int do_await = !plane && HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME);
1487 
1488  if (!s->flipped_image) stride = -stride;
1489  if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY))
1490  continue;
1491 
1492  /* for each superblock row in the slice (both of them)... */
1493  for (; sb_y < slice_height; sb_y++) {
1494 
1495  /* for each superblock in a row... */
1496  for (sb_x = 0; sb_x < slice_width; sb_x++) {
1497 
1498  /* for each block in a superblock... */
1499  for (j = 0; j < 16; j++) {
1500  x = 4*sb_x + hilbert_offset[j][0];
1501  y = 4*sb_y + hilbert_offset[j][1];
1502  fragment = y*fragment_width + x;
1503 
1504  i = fragment_start + fragment;
1505 
1506  // bounds check
1507  if (x >= fragment_width || y >= fragment_height)
1508  continue;
1509 
1510  first_pixel = 8*y*stride + 8*x;
1511 
1512  if (do_await && s->all_fragments[i].coding_method != MODE_INTRA)
1513  await_reference_row(s, &s->all_fragments[i], motion_val[fragment][1], (16*y) >> s->chroma_y_shift);
1514 
1515  /* transform if this block was coded */
1516  if (s->all_fragments[i].coding_method != MODE_COPY) {
1519  motion_source= golden_plane;
1520  else
1521  motion_source= last_plane;
1522 
1523  motion_source += first_pixel;
1524  motion_halfpel_index = 0;
1525 
1526  /* sort out the motion vector if this fragment is coded
1527  * using a motion vector method */
1528  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
1530  int src_x, src_y;
1531  motion_x = motion_val[fragment][0];
1532  motion_y = motion_val[fragment][1];
1533 
1534  src_x= (motion_x>>1) + 8*x;
1535  src_y= (motion_y>>1) + 8*y;
1536 
1537  motion_halfpel_index = motion_x & 0x01;
1538  motion_source += (motion_x >> 1);
1539 
1540  motion_halfpel_index |= (motion_y & 0x01) << 1;
1541  motion_source += ((motion_y >> 1) * stride);
1542 
1543  if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){
1544  uint8_t *temp= s->edge_emu_buffer;
1545  if(stride<0) temp -= 8*stride;
1546 
1547  s->vdsp.emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height);
1548  motion_source= temp;
1549  }
1550  }
1551 
1552 
1553  /* first, take care of copying a block from either the
1554  * previous or the golden frame */
1555  if (s->all_fragments[i].coding_method != MODE_INTRA) {
1556  /* Note, it is possible to implement all MC cases with
1557  put_no_rnd_pixels_l2 which would look more like the
1558  VP3 source but this would be slower as
1559  put_no_rnd_pixels_tab is better optimzed */
1560  if(motion_halfpel_index != 3){
1561  s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
1562  output_plane + first_pixel,
1563  motion_source, stride, 8);
1564  }else{
1565  int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
1566  s->dsp.put_no_rnd_pixels_l2[1](
1567  output_plane + first_pixel,
1568  motion_source - d,
1569  motion_source + stride + 1 + d,
1570  stride, 8);
1571  }
1572  }
1573 
1574  s->dsp.clear_block(block);
1575 
1576  /* invert DCT and place (or add) in final output */
1577 
1578  if (s->all_fragments[i].coding_method == MODE_INTRA) {
1579  int index;
1580  index = vp3_dequant(s, s->all_fragments + i, plane, 0, block);
1581  if (index > 63)
1582  continue;
1583  s->vp3dsp.idct_put(
1584  output_plane + first_pixel,
1585  stride,
1586  block);
1587  } else {
1588  int index = vp3_dequant(s, s->all_fragments + i, plane, 1, block);
1589  if (index > 63)
1590  continue;
1591  if (index > 0) {
1592  s->vp3dsp.idct_add(
1593  output_plane + first_pixel,
1594  stride,
1595  block);
1596  } else {
1597  s->vp3dsp.idct_dc_add(output_plane + first_pixel, stride, block);
1598  }
1599  }
1600  } else {
1601 
1602  /* copy directly from the previous frame */
1603  s->dsp.put_pixels_tab[1][0](
1604  output_plane + first_pixel,
1605  last_plane + first_pixel,
1606  stride, 8);
1607 
1608  }
1609  }
1610  }
1611 
1612  // Filter up to the last row in the superblock row
1613  if (!s->skip_loop_filter)
1614  apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1));
1615  }
1616  }
1617 
1618  /* this looks like a good place for slice dispatch... */
1619  /* algorithm:
1620  * if (slice == s->macroblock_height - 1)
1621  * dispatch (both last slice & 2nd-to-last slice);
1622  * else if (slice > 0)
1623  * dispatch (slice - 1);
1624  */
1625 
1626  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) -16, s->height-16));
1627 }
1628 
1631 {
1632  Vp3DecodeContext *s = avctx->priv_data;
1633  int y_fragment_count, c_fragment_count;
1634 
1635  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
1636  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
1637 
1640  s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int));
1641  s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base));
1642  s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0]));
1643  s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1]));
1644 
1645  /* work out the block mapping tables */
1646  s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
1648 
1649  if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base ||
1651  !s->motion_val[0] || !s->motion_val[1]) {
1652  vp3_decode_end(avctx);
1653  return -1;
1654  }
1655 
1656  init_block_mapping(s);
1657 
1658  return 0;
1659 }
1660 
1662 {
1663  Vp3DecodeContext *s = avctx->priv_data;
1664  int i, inter, plane;
1665  int c_width;
1666  int c_height;
1667  int y_fragment_count, c_fragment_count;
1668 
1669  if (avctx->codec_tag == MKTAG('V','P','3','0'))
1670  s->version = 0;
1671  else
1672  s->version = 1;
1673 
1674  s->avctx = avctx;
1675  s->width = FFALIGN(avctx->width, 16);
1676  s->height = FFALIGN(avctx->height, 16);
1677  if (avctx->pix_fmt == AV_PIX_FMT_NONE)
1678  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1680  ff_dsputil_init(&s->dsp, avctx);
1681  ff_videodsp_init(&s->vdsp, 8);
1682  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
1683 
1686 
1687  /* initialize to an impossible value which will force a recalculation
1688  * in the first frame decode */
1689  for (i = 0; i < 3; i++)
1690  s->qps[i] = -1;
1691 
1693  &s->chroma_y_shift);
1694 
1695  s->y_superblock_width = (s->width + 31) / 32;
1696  s->y_superblock_height = (s->height + 31) / 32;
1698 
1699  /* work out the dimensions for the C planes */
1700  c_width = s->width >> s->chroma_x_shift;
1701  c_height = s->height >> s->chroma_y_shift;
1702  s->c_superblock_width = (c_width + 31) / 32;
1703  s->c_superblock_height = (c_height + 31) / 32;
1705 
1709 
1710  s->macroblock_width = (s->width + 15) / 16;
1711  s->macroblock_height = (s->height + 15) / 16;
1713 
1714  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
1715  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
1716  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
1717  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
1718 
1719  /* fragment count covers all 8x8 blocks for all 3 planes */
1720  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
1721  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
1722  s->fragment_count = y_fragment_count + 2*c_fragment_count;
1723  s->fragment_start[1] = y_fragment_count;
1724  s->fragment_start[2] = y_fragment_count + c_fragment_count;
1725 
1726  if (!s->theora_tables)
1727  {
1728  for (i = 0; i < 64; i++) {
1731  s->base_matrix[0][i] = vp31_intra_y_dequant[i];
1732  s->base_matrix[1][i] = vp31_intra_c_dequant[i];
1733  s->base_matrix[2][i] = vp31_inter_dequant[i];
1735  }
1736 
1737  for(inter=0; inter<2; inter++){
1738  for(plane=0; plane<3; plane++){
1739  s->qr_count[inter][plane]= 1;
1740  s->qr_size [inter][plane][0]= 63;
1741  s->qr_base [inter][plane][0]=
1742  s->qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter;
1743  }
1744  }
1745 
1746  /* init VLC tables */
1747  for (i = 0; i < 16; i++) {
1748 
1749  /* DC histograms */
1750  init_vlc(&s->dc_vlc[i], 11, 32,
1751  &dc_bias[i][0][1], 4, 2,
1752  &dc_bias[i][0][0], 4, 2, 0);
1753 
1754  /* group 1 AC histograms */
1755  init_vlc(&s->ac_vlc_1[i], 11, 32,
1756  &ac_bias_0[i][0][1], 4, 2,
1757  &ac_bias_0[i][0][0], 4, 2, 0);
1758 
1759  /* group 2 AC histograms */
1760  init_vlc(&s->ac_vlc_2[i], 11, 32,
1761  &ac_bias_1[i][0][1], 4, 2,
1762  &ac_bias_1[i][0][0], 4, 2, 0);
1763 
1764  /* group 3 AC histograms */
1765  init_vlc(&s->ac_vlc_3[i], 11, 32,
1766  &ac_bias_2[i][0][1], 4, 2,
1767  &ac_bias_2[i][0][0], 4, 2, 0);
1768 
1769  /* group 4 AC histograms */
1770  init_vlc(&s->ac_vlc_4[i], 11, 32,
1771  &ac_bias_3[i][0][1], 4, 2,
1772  &ac_bias_3[i][0][0], 4, 2, 0);
1773  }
1774  } else {
1775 
1776  for (i = 0; i < 16; i++) {
1777  /* DC histograms */
1778  if (init_vlc(&s->dc_vlc[i], 11, 32,
1779  &s->huffman_table[i][0][1], 8, 4,
1780  &s->huffman_table[i][0][0], 8, 4, 0) < 0)
1781  goto vlc_fail;
1782 
1783  /* group 1 AC histograms */
1784  if (init_vlc(&s->ac_vlc_1[i], 11, 32,
1785  &s->huffman_table[i+16][0][1], 8, 4,
1786  &s->huffman_table[i+16][0][0], 8, 4, 0) < 0)
1787  goto vlc_fail;
1788 
1789  /* group 2 AC histograms */
1790  if (init_vlc(&s->ac_vlc_2[i], 11, 32,
1791  &s->huffman_table[i+16*2][0][1], 8, 4,
1792  &s->huffman_table[i+16*2][0][0], 8, 4, 0) < 0)
1793  goto vlc_fail;
1794 
1795  /* group 3 AC histograms */
1796  if (init_vlc(&s->ac_vlc_3[i], 11, 32,
1797  &s->huffman_table[i+16*3][0][1], 8, 4,
1798  &s->huffman_table[i+16*3][0][0], 8, 4, 0) < 0)
1799  goto vlc_fail;
1800 
1801  /* group 4 AC histograms */
1802  if (init_vlc(&s->ac_vlc_4[i], 11, 32,
1803  &s->huffman_table[i+16*4][0][1], 8, 4,
1804  &s->huffman_table[i+16*4][0][0], 8, 4, 0) < 0)
1805  goto vlc_fail;
1806  }
1807  }
1808 
1810  &superblock_run_length_vlc_table[0][1], 4, 2,
1811  &superblock_run_length_vlc_table[0][0], 4, 2, 0);
1812 
1813  init_vlc(&s->fragment_run_length_vlc, 5, 30,
1814  &fragment_run_length_vlc_table[0][1], 4, 2,
1815  &fragment_run_length_vlc_table[0][0], 4, 2, 0);
1816 
1817  init_vlc(&s->mode_code_vlc, 3, 8,
1818  &mode_code_vlc_table[0][1], 2, 1,
1819  &mode_code_vlc_table[0][0], 2, 1, 0);
1820 
1821  init_vlc(&s->motion_vector_vlc, 6, 63,
1822  &motion_vector_vlc_table[0][1], 2, 1,
1823  &motion_vector_vlc_table[0][0], 2, 1, 0);
1824 
1825  for (i = 0; i < 3; i++) {
1826  s->current_frame.data[i] = NULL;
1827  s->last_frame.data[i] = NULL;
1828  s->golden_frame.data[i] = NULL;
1829  }
1830 
1831  return allocate_tables(avctx);
1832 
1833 vlc_fail:
1834  av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n");
1835  return -1;
1836 }
1837 
1839 static void update_frames(AVCodecContext *avctx)
1840 {
1841  Vp3DecodeContext *s = avctx->priv_data;
1842 
1843  /* release the last frame, if it is allocated and if it is not the
1844  * golden frame */
1845  if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY)
1847 
1848  /* shuffle frames (last = current) */
1849  s->last_frame= s->current_frame;
1850 
1851  if (s->keyframe) {
1852  if (s->golden_frame.data[0])
1854  s->golden_frame = s->current_frame;
1856  }
1857 
1858  s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
1859 }
1860 
1862 {
1863  Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
1864  int qps_changed = 0, i, err;
1865 
1866 #define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field)
1867 
1868  if (!s1->current_frame.data[0]
1869  ||s->width != s1->width
1870  ||s->height!= s1->height) {
1871  if (s != s1)
1872  copy_fields(s, s1, golden_frame, current_frame);
1873  return -1;
1874  }
1875 
1876  if (s != s1) {
1877  // init tables if the first frame hasn't been decoded
1878  if (!s->current_frame.data[0]) {
1879  int y_fragment_count, c_fragment_count;
1880  s->avctx = dst;
1881  err = allocate_tables(dst);
1882  if (err)
1883  return err;
1884  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
1885  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
1886  memcpy(s->motion_val[0], s1->motion_val[0], y_fragment_count * sizeof(*s->motion_val[0]));
1887  memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1]));
1888  }
1889 
1890  // copy previous frame data
1891  copy_fields(s, s1, golden_frame, dsp);
1892 
1893  // copy qscale data if necessary
1894  for (i = 0; i < 3; i++) {
1895  if (s->qps[i] != s1->qps[1]) {
1896  qps_changed = 1;
1897  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
1898  }
1899  }
1900 
1901  if (s->qps[0] != s1->qps[0])
1902  memcpy(&s->bounding_values_array, &s1->bounding_values_array, sizeof(s->bounding_values_array));
1903 
1904  if (qps_changed)
1905  copy_fields(s, s1, qps, superblock_count);
1906 #undef copy_fields
1907  }
1908 
1909  update_frames(dst);
1910 
1911  return 0;
1912 }
1913 
1915  void *data, int *got_frame,
1916  AVPacket *avpkt)
1917 {
1918  const uint8_t *buf = avpkt->data;
1919  int buf_size = avpkt->size;
1920  Vp3DecodeContext *s = avctx->priv_data;
1921  GetBitContext gb;
1922  int i;
1923 
1924  init_get_bits(&gb, buf, buf_size * 8);
1925 
1926  if (s->theora && get_bits1(&gb))
1927  {
1928  av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
1929  return -1;
1930  }
1931 
1932  s->keyframe = !get_bits1(&gb);
1933  if (!s->theora)
1934  skip_bits(&gb, 1);
1935  for (i = 0; i < 3; i++)
1936  s->last_qps[i] = s->qps[i];
1937 
1938  s->nqps=0;
1939  do{
1940  s->qps[s->nqps++]= get_bits(&gb, 6);
1941  } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb));
1942  for (i = s->nqps; i < 3; i++)
1943  s->qps[i] = -1;
1944 
1945  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1946  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
1947  s->keyframe?"key":"", avctx->frame_number+1, s->qps[0]);
1948 
1949  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
1950  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL : AVDISCARD_NONKEY);
1951 
1952  if (s->qps[0] != s->last_qps[0])
1953  init_loop_filter(s);
1954 
1955  for (i = 0; i < s->nqps; i++)
1956  // reinit all dequantizers if the first one changed, because
1957  // the DC of the first quantizer must be used for all matrices
1958  if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
1959  init_dequantizer(s, i);
1960 
1961  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
1962  return buf_size;
1963 
1964  s->current_frame.reference = 3;
1965  s->current_frame.pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1966  if (ff_thread_get_buffer(avctx, &s->current_frame) < 0) {
1967  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1968  goto error;
1969  }
1970 
1971  if (!s->edge_emu_buffer)
1972  s->edge_emu_buffer = av_malloc(9*FFABS(s->current_frame.linesize[0]));
1973 
1974  if (s->keyframe) {
1975  if (!s->theora)
1976  {
1977  skip_bits(&gb, 4); /* width code */
1978  skip_bits(&gb, 4); /* height code */
1979  if (s->version)
1980  {
1981  s->version = get_bits(&gb, 5);
1982  if (avctx->frame_number == 0)
1983  av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
1984  }
1985  }
1986  if (s->version || s->theora)
1987  {
1988  if (get_bits1(&gb))
1989  av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
1990  skip_bits(&gb, 2); /* reserved? */
1991  }
1992  } else {
1993  if (!s->golden_frame.data[0]) {
1994  av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n");
1995 
1996  s->golden_frame.reference = 3;
1997  s->golden_frame.pict_type = AV_PICTURE_TYPE_I;
1998  if (ff_thread_get_buffer(avctx, &s->golden_frame) < 0) {
1999  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
2000  goto error;
2001  }
2002  s->last_frame = s->golden_frame;
2003  s->last_frame.type = FF_BUFFER_TYPE_COPY;
2004  ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
2005  }
2006  }
2007 
2008  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2009  ff_thread_finish_setup(avctx);
2010 
2011  if (unpack_superblocks(s, &gb)){
2012  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2013  goto error;
2014  }
2015  if (unpack_modes(s, &gb)){
2016  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2017  goto error;
2018  }
2019  if (unpack_vectors(s, &gb)){
2020  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2021  goto error;
2022  }
2023  if (unpack_block_qpis(s, &gb)){
2024  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2025  goto error;
2026  }
2027  if (unpack_dct_coeffs(s, &gb)){
2028  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2029  goto error;
2030  }
2031 
2032  for (i = 0; i < 3; i++) {
2033  int height = s->height >> (i && s->chroma_y_shift);
2034  if (s->flipped_image)
2035  s->data_offset[i] = 0;
2036  else
2037  s->data_offset[i] = (height-1) * s->current_frame.linesize[i];
2038  }
2039 
2040  s->last_slice_end = 0;
2041  for (i = 0; i < s->c_superblock_height; i++)
2042  render_slice(s, i);
2043 
2044  // filter the last row
2045  for (i = 0; i < 3; i++) {
2046  int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1;
2047  apply_loop_filter(s, i, row, row+1);
2048  }
2049  vp3_draw_horiz_band(s, s->avctx->height);
2050 
2051  *got_frame = 1;
2052  *(AVFrame*)data= s->current_frame;
2053 
2054  if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME))
2055  update_frames(avctx);
2056 
2057  return buf_size;
2058 
2059 error:
2060  ff_thread_report_progress(&s->current_frame, INT_MAX, 0);
2061 
2062  if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME))
2063  avctx->release_buffer(avctx, &s->current_frame);
2064 
2065  return -1;
2066 }
2067 
2069 {
2070  Vp3DecodeContext *s = avctx->priv_data;
2071 
2072  if (get_bits1(gb)) {
2073  int token;
2074  if (s->entries >= 32) { /* overflow */
2075  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2076  return -1;
2077  }
2078  token = get_bits(gb, 5);
2079  av_dlog(avctx, "hti %d hbits %x token %d entry : %d size %d\n",
2080  s->hti, s->hbits, token, s->entries, s->huff_code_size);
2081  s->huffman_table[s->hti][token][0] = s->hbits;
2082  s->huffman_table[s->hti][token][1] = s->huff_code_size;
2083  s->entries++;
2084  }
2085  else {
2086  if (s->huff_code_size >= 32) {/* overflow */
2087  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2088  return -1;
2089  }
2090  s->huff_code_size++;
2091  s->hbits <<= 1;
2092  if (read_huffman_tree(avctx, gb))
2093  return -1;
2094  s->hbits |= 1;
2095  if (read_huffman_tree(avctx, gb))
2096  return -1;
2097  s->hbits >>= 1;
2098  s->huff_code_size--;
2099  }
2100  return 0;
2101 }
2102 
2104 {
2105  Vp3DecodeContext *s = avctx->priv_data;
2106 
2107  s->superblock_coding = NULL;
2108  s->all_fragments = NULL;
2109  s->coded_fragment_list[0] = NULL;
2110  s->dct_tokens_base = NULL;
2112  s->macroblock_coding = NULL;
2113  s->motion_val[0] = NULL;
2114  s->motion_val[1] = NULL;
2115  s->edge_emu_buffer = NULL;
2116 
2117  return 0;
2118 }
2119 
2120 #if CONFIG_THEORA_DECODER
2121 static const enum AVPixelFormat theora_pix_fmts[4] = {
2123 };
2124 
2125 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2126 {
2127  Vp3DecodeContext *s = avctx->priv_data;
2128  int visible_width, visible_height, colorspace;
2129  int offset_x = 0, offset_y = 0;
2130  AVRational fps, aspect;
2131 
2132  s->theora = get_bits_long(gb, 24);
2133  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2134 
2135  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
2136  /* but previous versions have the image flipped relative to vp3 */
2137  if (s->theora < 0x030200)
2138  {
2139  s->flipped_image = 1;
2140  av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
2141  }
2142 
2143  visible_width = s->width = get_bits(gb, 16) << 4;
2144  visible_height = s->height = get_bits(gb, 16) << 4;
2145 
2146  if(av_image_check_size(s->width, s->height, 0, avctx)){
2147  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height);
2148  s->width= s->height= 0;
2149  return -1;
2150  }
2151 
2152  if (s->theora >= 0x030200) {
2153  visible_width = get_bits_long(gb, 24);
2154  visible_height = get_bits_long(gb, 24);
2155 
2156  offset_x = get_bits(gb, 8); /* offset x */
2157  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2158  }
2159 
2160  fps.num = get_bits_long(gb, 32);
2161  fps.den = get_bits_long(gb, 32);
2162  if (fps.num && fps.den) {
2163  if (fps.num < 0 || fps.den < 0) {
2164  av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
2165  return AVERROR_INVALIDDATA;
2166  }
2167  av_reduce(&avctx->time_base.num, &avctx->time_base.den,
2168  fps.den, fps.num, 1<<30);
2169  }
2170 
2171  aspect.num = get_bits_long(gb, 24);
2172  aspect.den = get_bits_long(gb, 24);
2173  if (aspect.num && aspect.den) {
2175  &avctx->sample_aspect_ratio.den,
2176  aspect.num, aspect.den, 1<<30);
2177  }
2178 
2179  if (s->theora < 0x030200)
2180  skip_bits(gb, 5); /* keyframe frequency force */
2181  colorspace = get_bits(gb, 8);
2182  skip_bits(gb, 24); /* bitrate */
2183 
2184  skip_bits(gb, 6); /* quality hint */
2185 
2186  if (s->theora >= 0x030200)
2187  {
2188  skip_bits(gb, 5); /* keyframe frequency force */
2189  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2190  skip_bits(gb, 3); /* reserved */
2191  }
2192 
2193 // align_get_bits(gb);
2194 
2195  if ( visible_width <= s->width && visible_width > s->width-16
2196  && visible_height <= s->height && visible_height > s->height-16
2197  && !offset_x && (offset_y == s->height - visible_height))
2198  avcodec_set_dimensions(avctx, visible_width, visible_height);
2199  else
2200  avcodec_set_dimensions(avctx, s->width, s->height);
2201 
2202  if (colorspace == 1) {
2204  } else if (colorspace == 2) {
2206  }
2207  if (colorspace == 1 || colorspace == 2) {
2208  avctx->colorspace = AVCOL_SPC_BT470BG;
2209  avctx->color_trc = AVCOL_TRC_BT709;
2210  }
2211 
2212  return 0;
2213 }
2214 
2215 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2216 {
2217  Vp3DecodeContext *s = avctx->priv_data;
2218  int i, n, matrices, inter, plane;
2219 
2220  if (s->theora >= 0x030200) {
2221  n = get_bits(gb, 3);
2222  /* loop filter limit values table */
2223  if (n)
2224  for (i = 0; i < 64; i++)
2225  s->filter_limit_values[i] = get_bits(gb, n);
2226  }
2227 
2228  if (s->theora >= 0x030200)
2229  n = get_bits(gb, 4) + 1;
2230  else
2231  n = 16;
2232  /* quality threshold table */
2233  for (i = 0; i < 64; i++)
2234  s->coded_ac_scale_factor[i] = get_bits(gb, n);
2235 
2236  if (s->theora >= 0x030200)
2237  n = get_bits(gb, 4) + 1;
2238  else
2239  n = 16;
2240  /* dc scale factor table */
2241  for (i = 0; i < 64; i++)
2242  s->coded_dc_scale_factor[i] = get_bits(gb, n);
2243 
2244  if (s->theora >= 0x030200)
2245  matrices = get_bits(gb, 9) + 1;
2246  else
2247  matrices = 3;
2248 
2249  if(matrices > 384){
2250  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
2251  return -1;
2252  }
2253 
2254  for(n=0; n<matrices; n++){
2255  for (i = 0; i < 64; i++)
2256  s->base_matrix[n][i]= get_bits(gb, 8);
2257  }
2258 
2259  for (inter = 0; inter <= 1; inter++) {
2260  for (plane = 0; plane <= 2; plane++) {
2261  int newqr= 1;
2262  if (inter || plane > 0)
2263  newqr = get_bits1(gb);
2264  if (!newqr) {
2265  int qtj, plj;
2266  if(inter && get_bits1(gb)){
2267  qtj = 0;
2268  plj = plane;
2269  }else{
2270  qtj= (3*inter + plane - 1) / 3;
2271  plj= (plane + 2) % 3;
2272  }
2273  s->qr_count[inter][plane]= s->qr_count[qtj][plj];
2274  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0]));
2275  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0]));
2276  } else {
2277  int qri= 0;
2278  int qi = 0;
2279 
2280  for(;;){
2281  i= get_bits(gb, av_log2(matrices-1)+1);
2282  if(i>= matrices){
2283  av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n");
2284  return -1;
2285  }
2286  s->qr_base[inter][plane][qri]= i;
2287  if(qi >= 63)
2288  break;
2289  i = get_bits(gb, av_log2(63-qi)+1) + 1;
2290  s->qr_size[inter][plane][qri++]= i;
2291  qi += i;
2292  }
2293 
2294  if (qi > 63) {
2295  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
2296  return -1;
2297  }
2298  s->qr_count[inter][plane]= qri;
2299  }
2300  }
2301  }
2302 
2303  /* Huffman tables */
2304  for (s->hti = 0; s->hti < 80; s->hti++) {
2305  s->entries = 0;
2306  s->huff_code_size = 1;
2307  if (!get_bits1(gb)) {
2308  s->hbits = 0;
2309  if(read_huffman_tree(avctx, gb))
2310  return -1;
2311  s->hbits = 1;
2312  if(read_huffman_tree(avctx, gb))
2313  return -1;
2314  }
2315  }
2316 
2317  s->theora_tables = 1;
2318 
2319  return 0;
2320 }
2321 
2322 static av_cold int theora_decode_init(AVCodecContext *avctx)
2323 {
2324  Vp3DecodeContext *s = avctx->priv_data;
2325  GetBitContext gb;
2326  int ptype;
2327  uint8_t *header_start[3];
2328  int header_len[3];
2329  int i;
2330 
2331  s->theora = 1;
2332 
2333  if (!avctx->extradata_size)
2334  {
2335  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
2336  return -1;
2337  }
2338 
2340  42, header_start, header_len) < 0) {
2341  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
2342  return -1;
2343  }
2344 
2345  for(i=0;i<3;i++) {
2346  if (header_len[i] <= 0)
2347  continue;
2348  init_get_bits(&gb, header_start[i], header_len[i] * 8);
2349 
2350  ptype = get_bits(&gb, 8);
2351 
2352  if (!(ptype & 0x80))
2353  {
2354  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
2355 // return -1;
2356  }
2357 
2358  // FIXME: Check for this as well.
2359  skip_bits_long(&gb, 6*8); /* "theora" */
2360 
2361  switch(ptype)
2362  {
2363  case 0x80:
2364  theora_decode_header(avctx, &gb);
2365  break;
2366  case 0x81:
2367 // FIXME: is this needed? it breaks sometimes
2368 // theora_decode_comments(avctx, gb);
2369  break;
2370  case 0x82:
2371  if (theora_decode_tables(avctx, &gb))
2372  return -1;
2373  break;
2374  default:
2375  av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80);
2376  break;
2377  }
2378  if(ptype != 0x81 && 8*header_len[i] != get_bits_count(&gb))
2379  av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype);
2380  if (s->theora < 0x030200)
2381  break;
2382  }
2383 
2384  return vp3_decode_init(avctx);
2385 }
2386 
2387 AVCodec ff_theora_decoder = {
2388  .name = "theora",
2389  .type = AVMEDIA_TYPE_VIDEO,
2390  .id = AV_CODEC_ID_THEORA,
2391  .priv_data_size = sizeof(Vp3DecodeContext),
2392  .init = theora_decode_init,
2393  .close = vp3_decode_end,
2395  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
2398  .long_name = NULL_IF_CONFIG_SMALL("Theora"),
2399  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
2400  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context)
2401 };
2402 #endif
2403 
2405  .name = "vp3",
2406  .type = AVMEDIA_TYPE_VIDEO,
2407  .id = AV_CODEC_ID_VP3,
2408  .priv_data_size = sizeof(Vp3DecodeContext),
2409  .init = vp3_decode_init,
2410  .close = vp3_decode_end,
2412  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
2415  .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
2416  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
2417  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
2418 };
#define BLOCK_Y
static const int16_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
int last_slice_end
Definition: vp3.c:142
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:61
discard all frames except keyframes
Definition: avcodec.h:535
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: dsputil.h:259
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2656
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
Definition: vp3.c:230
static int init_block_mapping(Vp3DecodeContext *s)
Definition: vp3.c:323
#define HAVE_THREADS
Definition: config.h:235
void(* h_loop_filter)(uint8_t *src, int stride, int *bounding_values)
Definition: vp3dsp.h:30
const uint8_t ff_zigzag_direct[64]
Definition: dsputil.c:59
#define SB_NOT_CODED
Definition: vp3.c:56
This structure describes decoded (raw) audio or video data.
Definition: avcodec.h:989
#define TOKEN_EOB(eob_run)
Definition: vp3.c:203
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:1458
#define PUR
int y_superblock_count
Definition: vp3.c:152
AVFrame last_frame
Definition: vp3.c:135
int bounding_values_array[256+2]
Definition: vp3.c:252
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:70
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:237
uint16_t qr_base[2][3][64]
Definition: vp3.c:182
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:197
VLC mode_code_vlc
Definition: vp3.c:225
int y_superblock_width
Definition: vp3.c:150
static const uint16_t fragment_run_length_vlc_table[30][2]
Definition: vp3data.h:119
#define MODE_INTER_PLUS_MV
Definition: vp3.c:67
Scantable.
Definition: dsputil.h:181
int num
numerator
Definition: rational.h:44
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
Definition: utils.c:149
int size
Definition: avcodec.h:916
int u_superblock_start
Definition: vp3.c:156
#define BLOCK_X
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1724
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:577
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1533
uint8_t coding_method
Definition: vp3.c:52
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:1661
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:423
#define VLC_TYPE
Definition: get_bits.h:61
AVFrame current_frame
Definition: vp3.c:136
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1134
discard all
Definition: avcodec.h:536
uint8_t permutated[64]
Definition: dsputil.h:183
VLC ac_vlc_4[16]
Definition: vp3.c:221
VLC motion_vector_vlc
Definition: vp3.c:226
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:279
int huff_code_size
Definition: vp3.c:248
int * superblock_fragments
Definition: vp3.c:236
VLC superblock_run_length_vlc
Definition: vp3.c:223
int stride
Definition: mace.c:144
AVCodec.
Definition: avcodec.h:2960
static const uint32_t vp31_ac_scale_factor[64]
Definition: vp3data.h:76
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:63
static const int motion_vector_table[63]
Definition: vp3data.h:179
static const uint16_t ac_bias_3[16][32][2]
Definition: vp3data.h:2634
static const uint16_t dc_bias[16][32][2]
Definition: vp3data.h:446
Vp3Fragment * all_fragments
Definition: vp3.c:168
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:394
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1130
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1465
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:151
#define CONFIG_GRAY
Definition: config.h:276
int y_superblock_height
Definition: vp3.c:151
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:50
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:228
uint8_t
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:683
VLC ac_vlc_1[16]
Definition: vp3.c:218
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:204
unsigned int hbits
Definition: vp3.c:246
int macroblock_width
Definition: vp3.c:161
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:354
#define emms_c()
Definition: internal.h:145
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1454
uint8_t qpi
Definition: vp3.c:53
static void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:259
#define DC_COEFF(u)
Definition: vp3.c:1132
mpeg1, jpeg, h263
Definition: avcodec.h:586
const char data[16]
Definition: mxf.c:66
uint8_t * data
Definition: avcodec.h:915
uint8_t filter_limit_values[64]
Definition: vp3.c:251
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:192
bitstream reader API header.
uint8_t idct_permutation[64]
idct input permutation.
Definition: dsputil.h:425
VLC ac_vlc_2[16]
Definition: vp3.c:219
#define LOCAL_ALIGNED_16(t, v,...)
Definition: dsputil.h:602
static const uint8_t mode_code_vlc_table[8][2]
Definition: vp3data.h:144
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2086
#define MODE_INTRA
Definition: vp3.c:66
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1038
static int init(AVCodecParserContext *s)
Definition: h264_parser.c:335
static const int16_t vp31_inter_dequant[64]
Definition: vp3data.h:54
static int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, int plane, int inter, DCTELEM block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1351
static const uint16_t ac_bias_1[16][32][2]
Definition: vp3data.h:1540
int height
Definition: vp3.c:132
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:547
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
Definition: pthread.c:702
static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:1914
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:151
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, int dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1012
VP3DSPContext vp3dsp
Definition: vp3.c:140
int c_superblock_width
Definition: vp3.c:153
Multithreading support functions.
struct Vp3DecodeContext Vp3DecodeContext
uint8_t qr_count[2][3]
Definition: vp3.c:180
int fragment_height[2]
Definition: vp3.c:166
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:63
VLC ac_vlc_3[16]
Definition: vp3.c:220
#define CODING_MODE_COUNT
Definition: vp3.c:73
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:1460
static const int zero_run_base[32]
Definition: vp3data.h:208
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:88
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2752
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:189
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1434
void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:36
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:146
int theora
Definition: vp3.c:130
const char * name
Name of the codec implementation.
Definition: avcodec.h:2967
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: dsputil.h:283
void(* clear_block)(DCTELEM *block)
Definition: dsputil.h:218
int qps[3]
Definition: vp3.c:145
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:79
static const int16_t vp31_intra_c_dequant[64]
Definition: vp3data.h:42
Definition: get_bits.h:63
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:69
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:128
static const int coeff_get_bits[32]
Definition: vp3data.h:223
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:36
also ITU-R BT1361
Definition: avcodec.h:551
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:408
int chroma_y_shift
Definition: vp3.c:133
int flipped_image
Definition: vp3.c:141
unsigned char * macroblock_coding
Definition: vp3.c:240
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:220
void(* idct_dc_add)(uint8_t *dest, int line_size, const DCTELEM *block)
Definition: vp3dsp.h:28
int fragment_width[2]
Definition: vp3.c:165
DSPContext dsp
Definition: vp3.c:138
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1567
#define SET_CHROMA_MODES
static DCTELEM block[64]
Definition: dct-test.c:169
VLC fragment_run_length_vlc
Definition: vp3.c:224
#define PU
int macroblock_height
Definition: vp3.c:162
int width
picture width / height.
Definition: avcodec.h:1508
int type
type of the buffer (to keep track of who has to deallocate data[*])
Definition: avcodec.h:1217
#define SB_PARTIALLY_CODED
Definition: vp3.c:57
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:914
uint8_t * edge_emu_buffer
Definition: vp3.c:242
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2058
#define MODE_COPY
Definition: vp3.c:76
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:515
static const uint16_t ac_bias_2[16][32][2]
Definition: vp3data.h:2087
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:119
int macroblock_count
Definition: vp3.c:160
int c_superblock_height
Definition: vp3.c:154
void(* idct_put)(uint8_t *dest, int line_size, DCTELEM *block)
Definition: vp3dsp.h:26
int total_num_coded_frags
Definition: vp3.c:211
int c_superblock_count
Definition: vp3.c:155
AVCodec ff_vp3_decoder
Definition: vp3.c:2404
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1284
NULL
Definition: eval.c:52
static int width
Definition: utils.c:156
int superblock_count
Definition: vp3.c:149
external API header
int entries
Definition: vp3.c:247
static void update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
Definition: vp3.c:1839
void ff_thread_await_progress(AVFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
Definition: pthread.c:684
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: avcodec.h:543
static const uint16_t ac_bias_0[16][32][2]
Definition: vp3data.h:993
int linesize[AV_NUM_DATA_POINTERS]
Size, in bytes, of the data for each picture/channel plane.
Definition: avcodec.h:1008
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:201
int skip_loop_filter
Definition: vp3.c:143
main external API structure.
Definition: avcodec.h:1339
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:326
int last_qps[3]
Definition: vp3.c:147
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1365
uint8_t qr_size[2][3][64]
Definition: vp3.c:181
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: get_bits.h:418
ScanTable scantable
Definition: vp3.c:174
#define PUL
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:1630
int data_offset[3]
Definition: vp3.c:170
int extradata_size
Definition: avcodec.h:1455
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:268
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:260
int index
Definition: gxfenc.c:72
#define SB_FULLY_CODED
Definition: vp3.c:58
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2072
rational number numerator/denominator
Definition: rational.h:43
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2065
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:372
int avpriv_split_xiph_headers(uint8_t *extradata, int extradata_size, int first_header_size, uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use. ...
Definition: xiph.c:24
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:210
int keyframe
Definition: vp3.c:137
#define TOKEN_COEFF(coeff)
Definition: vp3.c:205
#define s1
Definition: regdef.h:38
void ff_init_scantable_permutation(uint8_t *idct_permutation, int idct_permutation_type)
Definition: dsputil.c:143
#define MODE_GOLDEN_MV
Definition: vp3.c:71
void(* idct_add)(uint8_t *dest, int line_size, DCTELEM *block)
Definition: vp3dsp.h:27
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:301
#define FRAGMENT_PIXELS
Definition: vp3.c:47
static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
Definition: vp3.c:2068
short DCTELEM
Definition: dsputil.h:39
static const uint16_t superblock_run_length_vlc_table[34][2]
Definition: vp3data.h:98
#define MODE_USING_GOLDEN
Definition: vp3.c:70
uint32_t huffman_table[80][32][2]
Definition: vp3.c:249
AVFrame golden_frame
Definition: vp3.c:134
#define MODE_INTER_FOURMV
Definition: vp3.c:72
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: avcodec.h:997
void(* put_no_rnd_pixels_l2[2])(uint8_t *block, const uint8_t *a, const uint8_t *b, int line_size, int h)
Definition: dsputil.h:297
#define copy_fields(to, from, start_field, end_field)
Definition: vf_drawbox.c:36
int v_superblock_start
Definition: vp3.c:157
int height
Definition: gxfenc.c:72
int version
Definition: vp3.c:131
int * coded_fragment_list[3]
Definition: vp3.c:215
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:65
unsigned char * superblock_coding
Definition: vp3.c:158
int idct_perm
Definition: vp3dsp.h:32
common internal api header.
void(* v_loop_filter)(uint8_t *src, int stride, int *bounding_values)
Definition: vp3dsp.h:29
int16_t * dct_tokens_base
Definition: vp3.c:202
AVCodecContext * avctx
Definition: vp3.c:129
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1772
VideoDSPContext vdsp
Definition: vp3.c:139
struct Vp3Fragment Vp3Fragment
static const int eob_run_get_bits[7]
Definition: vp3data.h:204
static int vp3_init_thread_copy(AVCodecContext *avctx)
Definition: vp3.c:2103
static const int16_t vp31_dc_scale_factor[64]
Definition: vp3data.h:65
uint16_t coded_dc_scale_factor[64]
Definition: vp3.c:177
int den
denominator
Definition: rational.h:45
Core video DSP helper functions.
uint8_t base_matrix[384][64]
Definition: vp3.c:179
DSP utils.
int fragment_count
Definition: vp3.c:164
void * priv_data
Definition: avcodec.h:1382
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:860
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1436
void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: dsputil.c:122
#define av_log2
Definition: intmath.h:85
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1390
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:65
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:69
#define MODE_INTER_NO_MV
Definition: vp3.c:65
static const int eob_run_base[7]
Definition: vp3data.h:201
int fragment_start[3]
Definition: vp3.c:169
int theora_tables
Definition: vp3.c:130
static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: vp3.c:1861
#define MODE_INTER_LAST_MV
Definition: vp3.c:68
int chroma_x_shift
Definition: vp3.c:133
void ff_thread_report_progress(AVFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread.c:666
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread.c:921
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: avcodec.h:564
enum AVColorSpace colorspace
Definition: dirac.c:99
static const int zero_run_get_bits[32]
Definition: vp3data.h:215
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:274
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:898
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1396
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:322
int16_t dc
Definition: vp3.c:51
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
Definition: pthread.c:979
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:178
Predicted.
Definition: avutil.h:246
VLC dc_vlc[16]
Definition: vp3.c:217
DSPContext.
Definition: dsputil.h:194
#define PL
if(!(ptr_align%ac->ptr_align)&&samples_align >=aligned_len)
int8_t(*[2] motion_val)[2]
Definition: vp3.c:172