Libav
vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of Libav.
8  *
9  * Libav is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * Libav is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with Libav; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
29 #include "internal.h"
30 #include "avcodec.h"
31 #include "error_resilience.h"
32 #include "mpeg_er.h"
33 #include "mpegutils.h"
34 #include "mpegvideo.h"
35 #include "h263.h"
36 #include "h264chroma.h"
37 #include "qpeldsp.h"
38 #include "vc1.h"
39 #include "vc1data.h"
40 #include "vc1acdata.h"
41 #include "msmpeg4data.h"
42 #include "unary.h"
43 #include "mathops.h"
44 
45 #undef NDEBUG
46 #include <assert.h>
47 
48 #define MB_INTRA_VLC_BITS 9
49 #define DC_VLC_BITS 9
50 
51 
52 // offset tables for interlaced picture MVDATA decoding
53 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
54 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
55 
56 /***********************************************************************/
67 enum Imode {
75 }; //imode defines
77 
79 {
80  MpegEncContext *s = &v->s;
82  if (v->field_mode && !(v->second_field ^ v->tff)) {
83  s->dest[0] += s->current_picture_ptr->f->linesize[0];
84  s->dest[1] += s->current_picture_ptr->f->linesize[1];
85  s->dest[2] += s->current_picture_ptr->f->linesize[2];
86  }
87 }
88  //Bitplane group
90 
92 {
93  MpegEncContext *s = &v->s;
94  int topleft_mb_pos, top_mb_pos;
95  int stride_y, fieldtx = 0;
96  int v_dist;
97 
98  /* The put pixels loop is always one MB row behind the decoding loop,
99  * because we can only put pixels when overlap filtering is done, and
100  * for filtering of the bottom edge of a MB, we need the next MB row
101  * present as well.
102  * Within the row, the put pixels loop is also one MB col behind the
103  * decoding loop. The reason for this is again, because for filtering
104  * of the right MB edge, we need the next MB present. */
105  if (!s->first_slice_line) {
106  if (s->mb_x) {
107  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
108  if (v->fcm == ILACE_FRAME)
109  fieldtx = v->fieldtx_plane[topleft_mb_pos];
110  stride_y = s->linesize << fieldtx;
111  v_dist = (16 - fieldtx) >> (fieldtx == 0);
113  s->dest[0] - 16 * s->linesize - 16,
114  stride_y);
116  s->dest[0] - 16 * s->linesize - 8,
117  stride_y);
119  s->dest[0] - v_dist * s->linesize - 16,
120  stride_y);
122  s->dest[0] - v_dist * s->linesize - 8,
123  stride_y);
125  s->dest[1] - 8 * s->uvlinesize - 8,
126  s->uvlinesize);
128  s->dest[2] - 8 * s->uvlinesize - 8,
129  s->uvlinesize);
130  }
131  if (s->mb_x == s->mb_width - 1) {
132  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
133  if (v->fcm == ILACE_FRAME)
134  fieldtx = v->fieldtx_plane[top_mb_pos];
135  stride_y = s->linesize << fieldtx;
136  v_dist = fieldtx ? 15 : 8;
138  s->dest[0] - 16 * s->linesize,
139  stride_y);
141  s->dest[0] - 16 * s->linesize + 8,
142  stride_y);
144  s->dest[0] - v_dist * s->linesize,
145  stride_y);
147  s->dest[0] - v_dist * s->linesize + 8,
148  stride_y);
150  s->dest[1] - 8 * s->uvlinesize,
151  s->uvlinesize);
153  s->dest[2] - 8 * s->uvlinesize,
154  s->uvlinesize);
155  }
156  }
157 
158 #define inc_blk_idx(idx) do { \
159  idx++; \
160  if (idx >= v->n_allocated_blks) \
161  idx = 0; \
162  } while (0)
163 
168 }
169 
170 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
171 {
172  MpegEncContext *s = &v->s;
173  int j;
174  if (!s->first_slice_line) {
175  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
176  if (s->mb_x)
177  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
178  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
179  for (j = 0; j < 2; j++) {
180  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
181  if (s->mb_x)
182  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
183  }
184  }
185  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
186 
187  if (s->mb_y == s->end_mb_y - 1) {
188  if (s->mb_x) {
189  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
190  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
191  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
192  }
193  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
194  }
195 }
196 
198 {
199  MpegEncContext *s = &v->s;
200  int j;
201 
202  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
203  * means it runs two rows/cols behind the decoding loop. */
204  if (!s->first_slice_line) {
205  if (s->mb_x) {
206  if (s->mb_y >= s->start_mb_y + 2) {
207  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
208 
209  if (s->mb_x >= 2)
210  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
211  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
212  for (j = 0; j < 2; j++) {
213  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
214  if (s->mb_x >= 2) {
215  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
216  }
217  }
218  }
219  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
220  }
221 
222  if (s->mb_x == s->mb_width - 1) {
223  if (s->mb_y >= s->start_mb_y + 2) {
224  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
225 
226  if (s->mb_x)
227  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
228  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
229  for (j = 0; j < 2; j++) {
230  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
231  if (s->mb_x >= 2) {
232  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
233  }
234  }
235  }
236  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
237  }
238 
239  if (s->mb_y == s->end_mb_y) {
240  if (s->mb_x) {
241  if (s->mb_x >= 2)
242  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
243  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
244  if (s->mb_x >= 2) {
245  for (j = 0; j < 2; j++) {
246  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
247  }
248  }
249  }
250 
251  if (s->mb_x == s->mb_width - 1) {
252  if (s->mb_x)
253  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
254  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
255  if (s->mb_x) {
256  for (j = 0; j < 2; j++) {
257  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
258  }
259  }
260  }
261  }
262  }
263 }
264 
266 {
267  MpegEncContext *s = &v->s;
268  int mb_pos;
269 
270  if (v->condover == CONDOVER_NONE)
271  return;
272 
273  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
274 
275  /* Within a MB, the horizontal overlap always runs before the vertical.
276  * To accomplish that, we run the H on left and internal borders of the
277  * currently decoded MB. Then, we wait for the next overlap iteration
278  * to do H overlap on the right edge of this MB, before moving over and
279  * running the V overlap. Therefore, the V overlap makes us trail by one
280  * MB col and the H overlap filter makes us trail by one MB row. This
281  * is reflected in the time at which we run the put_pixels loop. */
282  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
283  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
284  v->over_flags_plane[mb_pos - 1])) {
286  v->block[v->cur_blk_idx][0]);
288  v->block[v->cur_blk_idx][2]);
289  if (!(s->flags & CODEC_FLAG_GRAY)) {
291  v->block[v->cur_blk_idx][4]);
293  v->block[v->cur_blk_idx][5]);
294  }
295  }
297  v->block[v->cur_blk_idx][1]);
299  v->block[v->cur_blk_idx][3]);
300 
301  if (s->mb_x == s->mb_width - 1) {
302  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
303  v->over_flags_plane[mb_pos - s->mb_stride])) {
305  v->block[v->cur_blk_idx][0]);
307  v->block[v->cur_blk_idx][1]);
308  if (!(s->flags & CODEC_FLAG_GRAY)) {
310  v->block[v->cur_blk_idx][4]);
312  v->block[v->cur_blk_idx][5]);
313  }
314  }
316  v->block[v->cur_blk_idx][2]);
318  v->block[v->cur_blk_idx][3]);
319  }
320  }
321  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
322  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
323  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
325  v->block[v->left_blk_idx][0]);
327  v->block[v->left_blk_idx][1]);
328  if (!(s->flags & CODEC_FLAG_GRAY)) {
330  v->block[v->left_blk_idx][4]);
332  v->block[v->left_blk_idx][5]);
333  }
334  }
336  v->block[v->left_blk_idx][2]);
338  v->block[v->left_blk_idx][3]);
339  }
340 }
341 
345 static void vc1_mc_1mv(VC1Context *v, int dir)
346 {
347  MpegEncContext *s = &v->s;
348  H264ChromaContext *h264chroma = &v->h264chroma;
349  uint8_t *srcY, *srcU, *srcV;
350  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
351  int v_edge_pos = s->v_edge_pos >> v->field_mode;
352  int i;
353  uint8_t (*luty)[256], (*lutuv)[256];
354  int use_ic;
355 
356  if ((!v->field_mode ||
357  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
358  !v->s.last_picture.f->data[0])
359  return;
360 
361  mx = s->mv[dir][0][0];
362  my = s->mv[dir][0][1];
363 
364  // store motion vectors for further use in B frames
365  if (s->pict_type == AV_PICTURE_TYPE_P) {
366  for (i = 0; i < 4; i++) {
367  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
368  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
369  }
370  }
371 
372  uvmx = (mx + ((mx & 3) == 3)) >> 1;
373  uvmy = (my + ((my & 3) == 3)) >> 1;
374  v->luma_mv[s->mb_x][0] = uvmx;
375  v->luma_mv[s->mb_x][1] = uvmy;
376 
377  if (v->field_mode &&
378  v->cur_field_type != v->ref_field_type[dir]) {
379  my = my - 2 + 4 * v->cur_field_type;
380  uvmy = uvmy - 2 + 4 * v->cur_field_type;
381  }
382 
383  // fastuvmc shall be ignored for interlaced frame picture
384  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
385  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
386  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
387  }
388  if (!dir) {
389  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
390  srcY = s->current_picture.f->data[0];
391  srcU = s->current_picture.f->data[1];
392  srcV = s->current_picture.f->data[2];
393  luty = v->curr_luty;
394  lutuv = v->curr_lutuv;
395  use_ic = v->curr_use_ic;
396  } else {
397  srcY = s->last_picture.f->data[0];
398  srcU = s->last_picture.f->data[1];
399  srcV = s->last_picture.f->data[2];
400  luty = v->last_luty;
401  lutuv = v->last_lutuv;
402  use_ic = v->last_use_ic;
403  }
404  } else {
405  srcY = s->next_picture.f->data[0];
406  srcU = s->next_picture.f->data[1];
407  srcV = s->next_picture.f->data[2];
408  luty = v->next_luty;
409  lutuv = v->next_lutuv;
410  use_ic = v->next_use_ic;
411  }
412 
413  if (!srcY || !srcU) {
414  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
415  return;
416  }
417 
418  src_x = s->mb_x * 16 + (mx >> 2);
419  src_y = s->mb_y * 16 + (my >> 2);
420  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
421  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
422 
423  if (v->profile != PROFILE_ADVANCED) {
424  src_x = av_clip( src_x, -16, s->mb_width * 16);
425  src_y = av_clip( src_y, -16, s->mb_height * 16);
426  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
427  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
428  } else {
429  src_x = av_clip( src_x, -17, s->avctx->coded_width);
430  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
431  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
432  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
433  }
434 
435  srcY += src_y * s->linesize + src_x;
436  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
437  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
438 
439  if (v->field_mode && v->ref_field_type[dir]) {
440  srcY += s->current_picture_ptr->f->linesize[0];
441  srcU += s->current_picture_ptr->f->linesize[1];
442  srcV += s->current_picture_ptr->f->linesize[2];
443  }
444 
445  /* for grayscale we should not try to read from unknown area */
446  if (s->flags & CODEC_FLAG_GRAY) {
447  srcU = s->edge_emu_buffer + 18 * s->linesize;
448  srcV = s->edge_emu_buffer + 18 * s->linesize;
449  }
450 
451  if (v->rangeredfrm || use_ic
452  || s->h_edge_pos < 22 || v_edge_pos < 22
453  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
454  || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
455  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
456 
457  srcY -= s->mspel * (1 + s->linesize);
459  s->linesize, s->linesize,
460  17 + s->mspel * 2, 17 + s->mspel * 2,
461  src_x - s->mspel, src_y - s->mspel,
462  s->h_edge_pos, v_edge_pos);
463  srcY = s->edge_emu_buffer;
464  s->vdsp.emulated_edge_mc(uvbuf, srcU,
465  s->uvlinesize, s->uvlinesize,
466  8 + 1, 8 + 1,
467  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
468  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
469  s->uvlinesize, s->uvlinesize,
470  8 + 1, 8 + 1,
471  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
472  srcU = uvbuf;
473  srcV = uvbuf + 16;
474  /* if we deal with range reduction we need to scale source blocks */
475  if (v->rangeredfrm) {
476  int i, j;
477  uint8_t *src, *src2;
478 
479  src = srcY;
480  for (j = 0; j < 17 + s->mspel * 2; j++) {
481  for (i = 0; i < 17 + s->mspel * 2; i++)
482  src[i] = ((src[i] - 128) >> 1) + 128;
483  src += s->linesize;
484  }
485  src = srcU;
486  src2 = srcV;
487  for (j = 0; j < 9; j++) {
488  for (i = 0; i < 9; i++) {
489  src[i] = ((src[i] - 128) >> 1) + 128;
490  src2[i] = ((src2[i] - 128) >> 1) + 128;
491  }
492  src += s->uvlinesize;
493  src2 += s->uvlinesize;
494  }
495  }
496  /* if we deal with intensity compensation we need to scale source blocks */
497  if (use_ic) {
498  int i, j;
499  uint8_t *src, *src2;
500 
501  src = srcY;
502  for (j = 0; j < 17 + s->mspel * 2; j++) {
503  int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
504  for (i = 0; i < 17 + s->mspel * 2; i++)
505  src[i] = luty[f][src[i]];
506  src += s->linesize;
507  }
508  src = srcU;
509  src2 = srcV;
510  for (j = 0; j < 9; j++) {
511  int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
512  for (i = 0; i < 9; i++) {
513  src[i] = lutuv[f][src[i]];
514  src2[i] = lutuv[f][src2[i]];
515  }
516  src += s->uvlinesize;
517  src2 += s->uvlinesize;
518  }
519  }
520  srcY += s->mspel * (1 + s->linesize);
521  }
522 
523  if (s->mspel) {
524  dxy = ((my & 3) << 2) | (mx & 3);
525  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
526  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
527  srcY += s->linesize * 8;
528  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
529  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
530  } else { // hpel mc - always used for luma
531  dxy = (my & 2) | ((mx & 2) >> 1);
532  if (!v->rnd)
533  s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
534  else
535  s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
536  }
537 
538  if (s->flags & CODEC_FLAG_GRAY) return;
539  /* Chroma MC always uses qpel bilinear */
540  uvmx = (uvmx & 3) << 1;
541  uvmy = (uvmy & 3) << 1;
542  if (!v->rnd) {
543  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
544  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
545  } else {
546  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
547  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
548  }
549 }
550 
551 static inline int median4(int a, int b, int c, int d)
552 {
553  if (a < b) {
554  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
555  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
556  } else {
557  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
558  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
559  }
560 }
561 
564 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
565 {
566  MpegEncContext *s = &v->s;
567  uint8_t *srcY;
568  int dxy, mx, my, src_x, src_y;
569  int off;
570  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
571  int v_edge_pos = s->v_edge_pos >> v->field_mode;
572  uint8_t (*luty)[256];
573  int use_ic;
574 
575  if ((!v->field_mode ||
576  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
577  !v->s.last_picture.f->data[0])
578  return;
579 
580  mx = s->mv[dir][n][0];
581  my = s->mv[dir][n][1];
582 
583  if (!dir) {
584  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
585  srcY = s->current_picture.f->data[0];
586  luty = v->curr_luty;
587  use_ic = v->curr_use_ic;
588  } else {
589  srcY = s->last_picture.f->data[0];
590  luty = v->last_luty;
591  use_ic = v->last_use_ic;
592  }
593  } else {
594  srcY = s->next_picture.f->data[0];
595  luty = v->next_luty;
596  use_ic = v->next_use_ic;
597  }
598 
599  if (!srcY) {
600  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
601  return;
602  }
603 
604  if (v->field_mode) {
605  if (v->cur_field_type != v->ref_field_type[dir])
606  my = my - 2 + 4 * v->cur_field_type;
607  }
608 
609  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
610  int same_count = 0, opp_count = 0, k;
611  int chosen_mv[2][4][2], f;
612  int tx, ty;
613  for (k = 0; k < 4; k++) {
614  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
615  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
616  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
617  opp_count += f;
618  same_count += 1 - f;
619  }
620  f = opp_count > same_count;
621  switch (f ? opp_count : same_count) {
622  case 4:
623  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
624  chosen_mv[f][2][0], chosen_mv[f][3][0]);
625  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
626  chosen_mv[f][2][1], chosen_mv[f][3][1]);
627  break;
628  case 3:
629  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
630  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
631  break;
632  case 2:
633  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
634  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
635  break;
636  }
637  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
638  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
639  for (k = 0; k < 4; k++)
640  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
641  }
642 
643  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
644  int qx, qy;
645  int width = s->avctx->coded_width;
646  int height = s->avctx->coded_height >> 1;
647  if (s->pict_type == AV_PICTURE_TYPE_P) {
648  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
649  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
650  }
651  qx = (s->mb_x * 16) + (mx >> 2);
652  qy = (s->mb_y * 8) + (my >> 3);
653 
654  if (qx < -17)
655  mx -= 4 * (qx + 17);
656  else if (qx > width)
657  mx -= 4 * (qx - width);
658  if (qy < -18)
659  my -= 8 * (qy + 18);
660  else if (qy > height + 1)
661  my -= 8 * (qy - height - 1);
662  }
663 
664  if ((v->fcm == ILACE_FRAME) && fieldmv)
665  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
666  else
667  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
668 
669  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
670  if (!fieldmv)
671  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
672  else
673  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
674 
675  if (v->profile != PROFILE_ADVANCED) {
676  src_x = av_clip(src_x, -16, s->mb_width * 16);
677  src_y = av_clip(src_y, -16, s->mb_height * 16);
678  } else {
679  src_x = av_clip(src_x, -17, s->avctx->coded_width);
680  if (v->fcm == ILACE_FRAME) {
681  if (src_y & 1)
682  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
683  else
684  src_y = av_clip(src_y, -18, s->avctx->coded_height);
685  } else {
686  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
687  }
688  }
689 
690  srcY += src_y * s->linesize + src_x;
691  if (v->field_mode && v->ref_field_type[dir])
692  srcY += s->current_picture_ptr->f->linesize[0];
693 
694  if (fieldmv && !(src_y & 1))
695  v_edge_pos--;
696  if (fieldmv && (src_y & 1) && src_y < 4)
697  src_y--;
698  if (v->rangeredfrm || use_ic
699  || s->h_edge_pos < 13 || v_edge_pos < 23
700  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
701  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
702  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
703  /* check emulate edge stride and offset */
705  s->linesize, s->linesize,
706  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
707  src_x - s->mspel, src_y - (s->mspel << fieldmv),
708  s->h_edge_pos, v_edge_pos);
709  srcY = s->edge_emu_buffer;
710  /* if we deal with range reduction we need to scale source blocks */
711  if (v->rangeredfrm) {
712  int i, j;
713  uint8_t *src;
714 
715  src = srcY;
716  for (j = 0; j < 9 + s->mspel * 2; j++) {
717  for (i = 0; i < 9 + s->mspel * 2; i++)
718  src[i] = ((src[i] - 128) >> 1) + 128;
719  src += s->linesize << fieldmv;
720  }
721  }
722  /* if we deal with intensity compensation we need to scale source blocks */
723  if (use_ic) {
724  int i, j;
725  uint8_t *src;
726 
727  src = srcY;
728  for (j = 0; j < 9 + s->mspel * 2; j++) {
729  int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
730  for (i = 0; i < 9 + s->mspel * 2; i++)
731  src[i] = luty[f][src[i]];
732  src += s->linesize << fieldmv;
733  }
734  }
735  srcY += s->mspel * (1 + (s->linesize << fieldmv));
736  }
737 
738  if (s->mspel) {
739  dxy = ((my & 3) << 2) | (mx & 3);
740  if (avg)
741  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
742  else
743  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
744  } else { // hpel mc - always used for luma
745  dxy = (my & 2) | ((mx & 2) >> 1);
746  if (!v->rnd)
747  s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
748  else
749  s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
750  }
751 }
752 
753 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
754 {
755  int idx, i;
756  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
757 
758  idx = ((a[3] != flag) << 3)
759  | ((a[2] != flag) << 2)
760  | ((a[1] != flag) << 1)
761  | (a[0] != flag);
762  if (!idx) {
763  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
764  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
765  return 4;
766  } else if (count[idx] == 1) {
767  switch (idx) {
768  case 0x1:
769  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
770  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
771  return 3;
772  case 0x2:
773  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
774  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
775  return 3;
776  case 0x4:
777  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
778  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
779  return 3;
780  case 0x8:
781  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
782  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
783  return 3;
784  }
785  } else if (count[idx] == 2) {
786  int t1 = 0, t2 = 0;
787  for (i = 0; i < 3; i++)
788  if (!a[i]) {
789  t1 = i;
790  break;
791  }
792  for (i = t1 + 1; i < 4; i++)
793  if (!a[i]) {
794  t2 = i;
795  break;
796  }
797  *tx = (mvx[t1] + mvx[t2]) / 2;
798  *ty = (mvy[t1] + mvy[t2]) / 2;
799  return 2;
800  } else {
801  return 0;
802  }
803  return -1;
804 }
805 
808 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
809 {
810  MpegEncContext *s = &v->s;
811  H264ChromaContext *h264chroma = &v->h264chroma;
812  uint8_t *srcU, *srcV;
813  int uvmx, uvmy, uvsrc_x, uvsrc_y;
814  int k, tx = 0, ty = 0;
815  int mvx[4], mvy[4], intra[4], mv_f[4];
816  int valid_count;
817  int chroma_ref_type = v->cur_field_type;
818  int v_edge_pos = s->v_edge_pos >> v->field_mode;
819  uint8_t (*lutuv)[256];
820  int use_ic;
821 
822  if (!v->field_mode && !v->s.last_picture.f->data[0])
823  return;
824  if (s->flags & CODEC_FLAG_GRAY)
825  return;
826 
827  for (k = 0; k < 4; k++) {
828  mvx[k] = s->mv[dir][k][0];
829  mvy[k] = s->mv[dir][k][1];
830  intra[k] = v->mb_type[0][s->block_index[k]];
831  if (v->field_mode)
832  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
833  }
834 
835  /* calculate chroma MV vector from four luma MVs */
836  if (!v->field_mode || (v->field_mode && !v->numref)) {
837  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
838  chroma_ref_type = v->reffield;
839  if (!valid_count) {
840  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
841  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
842  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
843  return; //no need to do MC for intra blocks
844  }
845  } else {
846  int dominant = 0;
847  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
848  dominant = 1;
849  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
850  if (dominant)
851  chroma_ref_type = !v->cur_field_type;
852  }
853  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
854  return;
855  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
856  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
857  uvmx = (tx + ((tx & 3) == 3)) >> 1;
858  uvmy = (ty + ((ty & 3) == 3)) >> 1;
859 
860  v->luma_mv[s->mb_x][0] = uvmx;
861  v->luma_mv[s->mb_x][1] = uvmy;
862 
863  if (v->fastuvmc) {
864  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
865  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
866  }
867  // Field conversion bias
868  if (v->cur_field_type != chroma_ref_type)
869  uvmy += 2 - 4 * chroma_ref_type;
870 
871  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
872  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
873 
874  if (v->profile != PROFILE_ADVANCED) {
875  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
876  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
877  } else {
878  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
879  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
880  }
881 
882  if (!dir) {
883  if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
884  srcU = s->current_picture.f->data[1];
885  srcV = s->current_picture.f->data[2];
886  lutuv = v->curr_lutuv;
887  use_ic = v->curr_use_ic;
888  } else {
889  srcU = s->last_picture.f->data[1];
890  srcV = s->last_picture.f->data[2];
891  lutuv = v->last_lutuv;
892  use_ic = v->last_use_ic;
893  }
894  } else {
895  srcU = s->next_picture.f->data[1];
896  srcV = s->next_picture.f->data[2];
897  lutuv = v->next_lutuv;
898  use_ic = v->next_use_ic;
899  }
900 
901  if (!srcU) {
902  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
903  return;
904  }
905 
906  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
907  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
908 
909  if (v->field_mode) {
910  if (chroma_ref_type) {
911  srcU += s->current_picture_ptr->f->linesize[1];
912  srcV += s->current_picture_ptr->f->linesize[2];
913  }
914  }
915 
916  if (v->rangeredfrm || use_ic
917  || s->h_edge_pos < 18 || v_edge_pos < 18
918  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
919  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
921  s->uvlinesize, s->uvlinesize,
922  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
923  s->h_edge_pos >> 1, v_edge_pos >> 1);
924  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
925  s->uvlinesize, s->uvlinesize,
926  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
927  s->h_edge_pos >> 1, v_edge_pos >> 1);
928  srcU = s->edge_emu_buffer;
929  srcV = s->edge_emu_buffer + 16;
930 
931  /* if we deal with range reduction we need to scale source blocks */
932  if (v->rangeredfrm) {
933  int i, j;
934  uint8_t *src, *src2;
935 
936  src = srcU;
937  src2 = srcV;
938  for (j = 0; j < 9; j++) {
939  for (i = 0; i < 9; i++) {
940  src[i] = ((src[i] - 128) >> 1) + 128;
941  src2[i] = ((src2[i] - 128) >> 1) + 128;
942  }
943  src += s->uvlinesize;
944  src2 += s->uvlinesize;
945  }
946  }
947  /* if we deal with intensity compensation we need to scale source blocks */
948  if (use_ic) {
949  int i, j;
950  uint8_t *src, *src2;
951 
952  src = srcU;
953  src2 = srcV;
954  for (j = 0; j < 9; j++) {
955  int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
956  for (i = 0; i < 9; i++) {
957  src[i] = lutuv[f][src[i]];
958  src2[i] = lutuv[f][src2[i]];
959  }
960  src += s->uvlinesize;
961  src2 += s->uvlinesize;
962  }
963  }
964  }
965 
966  /* Chroma MC always uses qpel bilinear */
967  uvmx = (uvmx & 3) << 1;
968  uvmy = (uvmy & 3) << 1;
969  if (!v->rnd) {
970  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
971  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
972  } else {
973  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
974  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
975  }
976 }
977 
980 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
981 {
982  MpegEncContext *s = &v->s;
983  H264ChromaContext *h264chroma = &v->h264chroma;
984  uint8_t *srcU, *srcV;
985  int uvsrc_x, uvsrc_y;
986  int uvmx_field[4], uvmy_field[4];
987  int i, off, tx, ty;
988  int fieldmv = v->blk_mv_type[s->block_index[0]];
989  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
990  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
991  int v_edge_pos = s->v_edge_pos >> 1;
992  int use_ic;
993  uint8_t (*lutuv)[256];
994 
995  if (s->flags & CODEC_FLAG_GRAY)
996  return;
997 
998  for (i = 0; i < 4; i++) {
999  int d = i < 2 ? dir: dir2;
1000  tx = s->mv[d][i][0];
1001  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1002  ty = s->mv[d][i][1];
1003  if (fieldmv)
1004  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1005  else
1006  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1007  }
1008 
1009  for (i = 0; i < 4; i++) {
1010  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1011  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1012  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1013  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1014  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1015  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1016  if (i < 2 ? dir : dir2) {
1017  srcU = s->next_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1018  srcV = s->next_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1019  lutuv = v->next_lutuv;
1020  use_ic = v->next_use_ic;
1021  } else {
1022  srcU = s->last_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1023  srcV = s->last_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1024  lutuv = v->last_lutuv;
1025  use_ic = v->last_use_ic;
1026  }
1027  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1028  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1029 
1030  if (fieldmv && !(uvsrc_y & 1))
1031  v_edge_pos--;
1032  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1033  uvsrc_y--;
1034  if (use_ic
1035  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1036  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1037  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1039  s->uvlinesize, s->uvlinesize,
1040  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1041  s->h_edge_pos >> 1, v_edge_pos);
1042  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1043  s->uvlinesize, s->uvlinesize,
1044  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1045  s->h_edge_pos >> 1, v_edge_pos);
1046  srcU = s->edge_emu_buffer;
1047  srcV = s->edge_emu_buffer + 16;
1048 
1049  /* if we deal with intensity compensation we need to scale source blocks */
1050  if (use_ic) {
1051  int i, j;
1052  uint8_t *src, *src2;
1053 
1054  src = srcU;
1055  src2 = srcV;
1056  for (j = 0; j < 5; j++) {
1057  int f = (uvsrc_y + (j << fieldmv)) & 1;
1058  for (i = 0; i < 5; i++) {
1059  src[i] = lutuv[f][src[i]];
1060  src2[i] = lutuv[f][src2[i]];
1061  }
1062  src += s->uvlinesize << fieldmv;
1063  src2 += s->uvlinesize << fieldmv;
1064  }
1065  }
1066  }
1067  if (avg) {
1068  if (!v->rnd) {
1069  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1070  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1071  } else {
1072  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1073  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1074  }
1075  } else {
1076  if (!v->rnd) {
1077  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1078  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1079  } else {
1080  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1081  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1082  }
1083  }
1084  }
1085 }
1086 
1087 /***********************************************************************/
1098 #define GET_MQUANT() \
1099  if (v->dquantfrm) { \
1100  int edges = 0; \
1101  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1102  if (v->dqbilevel) { \
1103  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1104  } else { \
1105  mqdiff = get_bits(gb, 3); \
1106  if (mqdiff != 7) \
1107  mquant = v->pq + mqdiff; \
1108  else \
1109  mquant = get_bits(gb, 5); \
1110  } \
1111  } \
1112  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1113  edges = 1 << v->dqsbedge; \
1114  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1115  edges = (3 << v->dqsbedge) % 15; \
1116  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1117  edges = 15; \
1118  if ((edges&1) && !s->mb_x) \
1119  mquant = v->altpq; \
1120  if ((edges&2) && s->first_slice_line) \
1121  mquant = v->altpq; \
1122  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1123  mquant = v->altpq; \
1124  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1125  mquant = v->altpq; \
1126  if (!mquant || mquant > 31) { \
1127  av_log(v->s.avctx, AV_LOG_ERROR, \
1128  "Overriding invalid mquant %d\n", mquant); \
1129  mquant = 1; \
1130  } \
1131  }
1132 
1140 #define GET_MVDATA(_dmv_x, _dmv_y) \
1141  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1142  VC1_MV_DIFF_VLC_BITS, 2); \
1143  if (index > 36) { \
1144  mb_has_coeffs = 1; \
1145  index -= 37; \
1146  } else \
1147  mb_has_coeffs = 0; \
1148  s->mb_intra = 0; \
1149  if (!index) { \
1150  _dmv_x = _dmv_y = 0; \
1151  } else if (index == 35) { \
1152  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1153  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1154  } else if (index == 36) { \
1155  _dmv_x = 0; \
1156  _dmv_y = 0; \
1157  s->mb_intra = 1; \
1158  } else { \
1159  index1 = index % 6; \
1160  if (!s->quarter_sample && index1 == 5) val = 1; \
1161  else val = 0; \
1162  if (size_table[index1] - val > 0) \
1163  val = get_bits(gb, size_table[index1] - val); \
1164  else val = 0; \
1165  sign = 0 - (val&1); \
1166  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1167  \
1168  index1 = index / 6; \
1169  if (!s->quarter_sample && index1 == 5) val = 1; \
1170  else val = 0; \
1171  if (size_table[index1] - val > 0) \
1172  val = get_bits(gb, size_table[index1] - val); \
1173  else val = 0; \
1174  sign = 0 - (val & 1); \
1175  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1176  }
1177 
1179  int *dmv_y, int *pred_flag)
1180 {
1181  int index, index1;
1182  int extend_x = 0, extend_y = 0;
1183  GetBitContext *gb = &v->s.gb;
1184  int bits, esc;
1185  int val, sign;
1186  const int* offs_tab;
1187 
1188  if (v->numref) {
1189  bits = VC1_2REF_MVDATA_VLC_BITS;
1190  esc = 125;
1191  } else {
1192  bits = VC1_1REF_MVDATA_VLC_BITS;
1193  esc = 71;
1194  }
1195  switch (v->dmvrange) {
1196  case 1:
1197  extend_x = 1;
1198  break;
1199  case 2:
1200  extend_y = 1;
1201  break;
1202  case 3:
1203  extend_x = extend_y = 1;
1204  break;
1205  }
1206  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1207  if (index == esc) {
1208  *dmv_x = get_bits(gb, v->k_x);
1209  *dmv_y = get_bits(gb, v->k_y);
1210  if (v->numref) {
1211  if (pred_flag) {
1212  *pred_flag = *dmv_y & 1;
1213  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1214  } else {
1215  *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1216  }
1217  }
1218  }
1219  else {
1220  if (extend_x)
1221  offs_tab = offset_table2;
1222  else
1223  offs_tab = offset_table1;
1224  index1 = (index + 1) % 9;
1225  if (index1 != 0) {
1226  val = get_bits(gb, index1 + extend_x);
1227  sign = 0 -(val & 1);
1228  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1229  } else
1230  *dmv_x = 0;
1231  if (extend_y)
1232  offs_tab = offset_table2;
1233  else
1234  offs_tab = offset_table1;
1235  index1 = (index + 1) / 9;
1236  if (index1 > v->numref) {
1237  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1238  sign = 0 - (val & 1);
1239  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1240  } else
1241  *dmv_y = 0;
1242  if (v->numref && pred_flag)
1243  *pred_flag = index1 & 1;
1244  }
1245 }
1246 
1247 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1248 {
1249  int scaledvalue, refdist;
1250  int scalesame1, scalesame2;
1251  int scalezone1_x, zone1offset_x;
1252  int table_index = dir ^ v->second_field;
1253 
1254  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1255  refdist = v->refdist;
1256  else
1257  refdist = dir ? v->brfd : v->frfd;
1258  if (refdist > 3)
1259  refdist = 3;
1260  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1261  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1262  scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1263  zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1264 
1265  if (FFABS(n) > 255)
1266  scaledvalue = n;
1267  else {
1268  if (FFABS(n) < scalezone1_x)
1269  scaledvalue = (n * scalesame1) >> 8;
1270  else {
1271  if (n < 0)
1272  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1273  else
1274  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1275  }
1276  }
1277  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1278 }
1279 
1280 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1281 {
1282  int scaledvalue, refdist;
1283  int scalesame1, scalesame2;
1284  int scalezone1_y, zone1offset_y;
1285  int table_index = dir ^ v->second_field;
1286 
1287  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1288  refdist = v->refdist;
1289  else
1290  refdist = dir ? v->brfd : v->frfd;
1291  if (refdist > 3)
1292  refdist = 3;
1293  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1294  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1295  scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1296  zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1297 
1298  if (FFABS(n) > 63)
1299  scaledvalue = n;
1300  else {
1301  if (FFABS(n) < scalezone1_y)
1302  scaledvalue = (n * scalesame1) >> 8;
1303  else {
1304  if (n < 0)
1305  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1306  else
1307  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1308  }
1309  }
1310 
1311  if (v->cur_field_type && !v->ref_field_type[dir])
1312  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1313  else
1314  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1315 }
1316 
1317 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1318 {
1319  int scalezone1_x, zone1offset_x;
1320  int scaleopp1, scaleopp2, brfd;
1321  int scaledvalue;
1322 
1323  brfd = FFMIN(v->brfd, 3);
1324  scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1325  zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1326  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1327  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1328 
1329  if (FFABS(n) > 255)
1330  scaledvalue = n;
1331  else {
1332  if (FFABS(n) < scalezone1_x)
1333  scaledvalue = (n * scaleopp1) >> 8;
1334  else {
1335  if (n < 0)
1336  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1337  else
1338  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1339  }
1340  }
1341  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1342 }
1343 
1344 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1345 {
1346  int scalezone1_y, zone1offset_y;
1347  int scaleopp1, scaleopp2, brfd;
1348  int scaledvalue;
1349 
1350  brfd = FFMIN(v->brfd, 3);
1351  scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1352  zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1353  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1354  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1355 
1356  if (FFABS(n) > 63)
1357  scaledvalue = n;
1358  else {
1359  if (FFABS(n) < scalezone1_y)
1360  scaledvalue = (n * scaleopp1) >> 8;
1361  else {
1362  if (n < 0)
1363  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1364  else
1365  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1366  }
1367  }
1368  if (v->cur_field_type && !v->ref_field_type[dir]) {
1369  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1370  } else {
1371  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1372  }
1373 }
1374 
1375 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1376  int dim, int dir)
1377 {
1378  int brfd, scalesame;
1379  int hpel = 1 - v->s.quarter_sample;
1380 
1381  n >>= hpel;
1382  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1383  if (dim)
1384  n = scaleforsame_y(v, i, n, dir) << hpel;
1385  else
1386  n = scaleforsame_x(v, n, dir) << hpel;
1387  return n;
1388  }
1389  brfd = FFMIN(v->brfd, 3);
1390  scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1391 
1392  n = (n * scalesame >> 8) << hpel;
1393  return n;
1394 }
1395 
1396 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1397  int dim, int dir)
1398 {
1399  int refdist, scaleopp;
1400  int hpel = 1 - v->s.quarter_sample;
1401 
1402  n >>= hpel;
1403  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1404  if (dim)
1405  n = scaleforopp_y(v, n, dir) << hpel;
1406  else
1407  n = scaleforopp_x(v, n) << hpel;
1408  return n;
1409  }
1410  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1411  refdist = FFMIN(v->refdist, 3);
1412  else
1413  refdist = dir ? v->brfd : v->frfd;
1414  scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1415 
1416  n = (n * scaleopp >> 8) << hpel;
1417  return n;
1418 }
1419 
1422 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1423  int mv1, int r_x, int r_y, uint8_t* is_intra,
1424  int pred_flag, int dir)
1425 {
1426  MpegEncContext *s = &v->s;
1427  int xy, wrap, off = 0;
1428  int16_t *A, *B, *C;
1429  int px, py;
1430  int sum;
1431  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1432  int opposite, a_f, b_f, c_f;
1433  int16_t field_predA[2];
1434  int16_t field_predB[2];
1435  int16_t field_predC[2];
1436  int a_valid, b_valid, c_valid;
1437  int hybridmv_thresh, y_bias = 0;
1438 
1439  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1441  mixedmv_pic = 1;
1442  else
1443  mixedmv_pic = 0;
1444  /* scale MV difference to be quad-pel */
1445  dmv_x <<= 1 - s->quarter_sample;
1446  dmv_y <<= 1 - s->quarter_sample;
1447 
1448  wrap = s->b8_stride;
1449  xy = s->block_index[n];
1450 
1451  if (s->mb_intra) {
1452  s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1453  s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1454  s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1455  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1456  if (mv1) { /* duplicate motion data for 1-MV block */
1457  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1458  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1459  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1460  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1461  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1462  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1463  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1464  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1465  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1466  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1467  s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1468  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1469  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1470  }
1471  return;
1472  }
1473 
1474  C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1475  A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1476  if (mv1) {
1477  if (v->field_mode && mixedmv_pic)
1478  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1479  else
1480  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1481  } else {
1482  //in 4-MV mode different blocks have different B predictor position
1483  switch (n) {
1484  case 0:
1485  off = (s->mb_x > 0) ? -1 : 1;
1486  break;
1487  case 1:
1488  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1489  break;
1490  case 2:
1491  off = 1;
1492  break;
1493  case 3:
1494  off = -1;
1495  }
1496  }
1497  B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1498 
1499  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1500  b_valid = a_valid && (s->mb_width > 1);
1501  c_valid = s->mb_x || (n == 1 || n == 3);
1502  if (v->field_mode) {
1503  a_valid = a_valid && !is_intra[xy - wrap];
1504  b_valid = b_valid && !is_intra[xy - wrap + off];
1505  c_valid = c_valid && !is_intra[xy - 1];
1506  }
1507 
1508  if (a_valid) {
1509  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1510  num_oppfield += a_f;
1511  num_samefield += 1 - a_f;
1512  field_predA[0] = A[0];
1513  field_predA[1] = A[1];
1514  } else {
1515  field_predA[0] = field_predA[1] = 0;
1516  a_f = 0;
1517  }
1518  if (b_valid) {
1519  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1520  num_oppfield += b_f;
1521  num_samefield += 1 - b_f;
1522  field_predB[0] = B[0];
1523  field_predB[1] = B[1];
1524  } else {
1525  field_predB[0] = field_predB[1] = 0;
1526  b_f = 0;
1527  }
1528  if (c_valid) {
1529  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1530  num_oppfield += c_f;
1531  num_samefield += 1 - c_f;
1532  field_predC[0] = C[0];
1533  field_predC[1] = C[1];
1534  } else {
1535  field_predC[0] = field_predC[1] = 0;
1536  c_f = 0;
1537  }
1538 
1539  if (v->field_mode) {
1540  if (!v->numref)
1541  // REFFIELD determines if the last field or the second-last field is
1542  // to be used as reference
1543  opposite = 1 - v->reffield;
1544  else {
1545  if (num_samefield <= num_oppfield)
1546  opposite = 1 - pred_flag;
1547  else
1548  opposite = pred_flag;
1549  }
1550  } else
1551  opposite = 0;
1552  if (opposite) {
1553  if (a_valid && !a_f) {
1554  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1555  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1556  }
1557  if (b_valid && !b_f) {
1558  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1559  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1560  }
1561  if (c_valid && !c_f) {
1562  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1563  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1564  }
1565  v->mv_f[dir][xy + v->blocks_off] = 1;
1566  v->ref_field_type[dir] = !v->cur_field_type;
1567  } else {
1568  if (a_valid && a_f) {
1569  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1570  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1571  }
1572  if (b_valid && b_f) {
1573  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1574  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1575  }
1576  if (c_valid && c_f) {
1577  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1578  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1579  }
1580  v->mv_f[dir][xy + v->blocks_off] = 0;
1581  v->ref_field_type[dir] = v->cur_field_type;
1582  }
1583 
1584  if (a_valid) {
1585  px = field_predA[0];
1586  py = field_predA[1];
1587  } else if (c_valid) {
1588  px = field_predC[0];
1589  py = field_predC[1];
1590  } else if (b_valid) {
1591  px = field_predB[0];
1592  py = field_predB[1];
1593  } else {
1594  px = 0;
1595  py = 0;
1596  }
1597 
1598  if (num_samefield + num_oppfield > 1) {
1599  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1600  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1601  }
1602 
1603  /* Pullback MV as specified in 8.3.5.3.4 */
1604  if (!v->field_mode) {
1605  int qx, qy, X, Y;
1606  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1607  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1608  X = (s->mb_width << 6) - 4;
1609  Y = (s->mb_height << 6) - 4;
1610  if (mv1) {
1611  if (qx + px < -60) px = -60 - qx;
1612  if (qy + py < -60) py = -60 - qy;
1613  } else {
1614  if (qx + px < -28) px = -28 - qx;
1615  if (qy + py < -28) py = -28 - qy;
1616  }
1617  if (qx + px > X) px = X - qx;
1618  if (qy + py > Y) py = Y - qy;
1619  }
1620 
1621  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1622  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1623  hybridmv_thresh = 32;
1624  if (a_valid && c_valid) {
1625  if (is_intra[xy - wrap])
1626  sum = FFABS(px) + FFABS(py);
1627  else
1628  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1629  if (sum > hybridmv_thresh) {
1630  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1631  px = field_predA[0];
1632  py = field_predA[1];
1633  } else {
1634  px = field_predC[0];
1635  py = field_predC[1];
1636  }
1637  } else {
1638  if (is_intra[xy - 1])
1639  sum = FFABS(px) + FFABS(py);
1640  else
1641  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1642  if (sum > hybridmv_thresh) {
1643  if (get_bits1(&s->gb)) {
1644  px = field_predA[0];
1645  py = field_predA[1];
1646  } else {
1647  px = field_predC[0];
1648  py = field_predC[1];
1649  }
1650  }
1651  }
1652  }
1653  }
1654 
1655  if (v->field_mode && v->numref)
1656  r_y >>= 1;
1657  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1658  y_bias = 1;
1659  /* store MV using signed modulus of MV range defined in 4.11 */
1660  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1661  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1662  if (mv1) { /* duplicate motion data for 1-MV block */
1663  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1664  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1665  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1666  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1667  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1668  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1669  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1670  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1671  }
1672 }
1673 
1676 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1677  int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1678 {
1679  MpegEncContext *s = &v->s;
1680  int xy, wrap, off = 0;
1681  int A[2], B[2], C[2];
1682  int px, py;
1683  int a_valid = 0, b_valid = 0, c_valid = 0;
1684  int field_a, field_b, field_c; // 0: same, 1: opposit
1685  int total_valid, num_samefield, num_oppfield;
1686  int pos_c, pos_b, n_adj;
1687 
1688  wrap = s->b8_stride;
1689  xy = s->block_index[n];
1690 
1691  if (s->mb_intra) {
1692  s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1693  s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1694  s->current_picture.motion_val[1][xy][0] = 0;
1695  s->current_picture.motion_val[1][xy][1] = 0;
1696  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1697  s->current_picture.motion_val[0][xy + 1][0] = 0;
1698  s->current_picture.motion_val[0][xy + 1][1] = 0;
1699  s->current_picture.motion_val[0][xy + wrap][0] = 0;
1700  s->current_picture.motion_val[0][xy + wrap][1] = 0;
1701  s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1702  s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1703  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1704  s->current_picture.motion_val[1][xy + 1][0] = 0;
1705  s->current_picture.motion_val[1][xy + 1][1] = 0;
1706  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1707  s->current_picture.motion_val[1][xy + wrap][1] = 0;
1708  s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1709  s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1710  }
1711  return;
1712  }
1713 
1714  off = ((n == 0) || (n == 1)) ? 1 : -1;
1715  /* predict A */
1716  if (s->mb_x || (n == 1) || (n == 3)) {
1717  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1718  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1719  A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1720  A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1721  a_valid = 1;
1722  } else { // current block has frame mv and cand. has field MV (so average)
1723  A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1724  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1725  A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1726  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1727  a_valid = 1;
1728  }
1729  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1730  a_valid = 0;
1731  A[0] = A[1] = 0;
1732  }
1733  } else
1734  A[0] = A[1] = 0;
1735  /* Predict B and C */
1736  B[0] = B[1] = C[0] = C[1] = 0;
1737  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1738  if (!s->first_slice_line) {
1739  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1740  b_valid = 1;
1741  n_adj = n | 2;
1742  pos_b = s->block_index[n_adj] - 2 * wrap;
1743  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1744  n_adj = (n & 2) | (n & 1);
1745  }
1746  B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1747  B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1748  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1749  B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1750  B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1751  }
1752  }
1753  if (s->mb_width > 1) {
1754  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1755  c_valid = 1;
1756  n_adj = 2;
1757  pos_c = s->block_index[2] - 2 * wrap + 2;
1758  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1759  n_adj = n & 2;
1760  }
1761  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1762  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1763  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1764  C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1765  C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1766  }
1767  if (s->mb_x == s->mb_width - 1) {
1768  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1769  c_valid = 1;
1770  n_adj = 3;
1771  pos_c = s->block_index[3] - 2 * wrap - 2;
1772  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1773  n_adj = n | 1;
1774  }
1775  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1776  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1777  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1778  C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1779  C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1780  }
1781  } else
1782  c_valid = 0;
1783  }
1784  }
1785  }
1786  }
1787  } else {
1788  pos_b = s->block_index[1];
1789  b_valid = 1;
1790  B[0] = s->current_picture.motion_val[dir][pos_b][0];
1791  B[1] = s->current_picture.motion_val[dir][pos_b][1];
1792  pos_c = s->block_index[0];
1793  c_valid = 1;
1794  C[0] = s->current_picture.motion_val[dir][pos_c][0];
1795  C[1] = s->current_picture.motion_val[dir][pos_c][1];
1796  }
1797 
1798  total_valid = a_valid + b_valid + c_valid;
1799  // check if predictor A is out of bounds
1800  if (!s->mb_x && !(n == 1 || n == 3)) {
1801  A[0] = A[1] = 0;
1802  }
1803  // check if predictor B is out of bounds
1804  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1805  B[0] = B[1] = C[0] = C[1] = 0;
1806  }
1807  if (!v->blk_mv_type[xy]) {
1808  if (s->mb_width == 1) {
1809  px = B[0];
1810  py = B[1];
1811  } else {
1812  if (total_valid >= 2) {
1813  px = mid_pred(A[0], B[0], C[0]);
1814  py = mid_pred(A[1], B[1], C[1]);
1815  } else if (total_valid) {
1816  if (a_valid) { px = A[0]; py = A[1]; }
1817  if (b_valid) { px = B[0]; py = B[1]; }
1818  if (c_valid) { px = C[0]; py = C[1]; }
1819  } else
1820  px = py = 0;
1821  }
1822  } else {
1823  if (a_valid)
1824  field_a = (A[1] & 4) ? 1 : 0;
1825  else
1826  field_a = 0;
1827  if (b_valid)
1828  field_b = (B[1] & 4) ? 1 : 0;
1829  else
1830  field_b = 0;
1831  if (c_valid)
1832  field_c = (C[1] & 4) ? 1 : 0;
1833  else
1834  field_c = 0;
1835 
1836  num_oppfield = field_a + field_b + field_c;
1837  num_samefield = total_valid - num_oppfield;
1838  if (total_valid == 3) {
1839  if ((num_samefield == 3) || (num_oppfield == 3)) {
1840  px = mid_pred(A[0], B[0], C[0]);
1841  py = mid_pred(A[1], B[1], C[1]);
1842  } else if (num_samefield >= num_oppfield) {
1843  /* take one MV from same field set depending on priority
1844  the check for B may not be necessary */
1845  px = !field_a ? A[0] : B[0];
1846  py = !field_a ? A[1] : B[1];
1847  } else {
1848  px = field_a ? A[0] : B[0];
1849  py = field_a ? A[1] : B[1];
1850  }
1851  } else if (total_valid == 2) {
1852  if (num_samefield >= num_oppfield) {
1853  if (!field_a && a_valid) {
1854  px = A[0];
1855  py = A[1];
1856  } else if (!field_b && b_valid) {
1857  px = B[0];
1858  py = B[1];
1859  } else if (c_valid) {
1860  px = C[0];
1861  py = C[1];
1862  } else px = py = 0;
1863  } else {
1864  if (field_a && a_valid) {
1865  px = A[0];
1866  py = A[1];
1867  } else if (field_b && b_valid) {
1868  px = B[0];
1869  py = B[1];
1870  } else if (c_valid) {
1871  px = C[0];
1872  py = C[1];
1873  } else
1874  px = py = 0;
1875  }
1876  } else if (total_valid == 1) {
1877  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1878  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1879  } else
1880  px = py = 0;
1881  }
1882 
1883  /* store MV using signed modulus of MV range defined in 4.11 */
1884  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1885  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1886  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1887  s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1888  s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1889  s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1890  s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1891  s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1892  s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1893  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1894  s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1895  s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1896  s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1897  s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1898  }
1899 }
1900 
1903 static void vc1_interp_mc(VC1Context *v)
1904 {
1905  MpegEncContext *s = &v->s;
1906  H264ChromaContext *h264chroma = &v->h264chroma;
1907  uint8_t *srcY, *srcU, *srcV;
1908  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1909  int off, off_uv;
1910  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1911  int use_ic = v->next_use_ic;
1912 
1913  if (!v->field_mode && !v->s.next_picture.f->data[0])
1914  return;
1915 
1916  mx = s->mv[1][0][0];
1917  my = s->mv[1][0][1];
1918  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1919  uvmy = (my + ((my & 3) == 3)) >> 1;
1920  if (v->field_mode) {
1921  if (v->cur_field_type != v->ref_field_type[1])
1922  my = my - 2 + 4 * v->cur_field_type;
1923  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1924  }
1925  if (v->fastuvmc) {
1926  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1927  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1928  }
1929  srcY = s->next_picture.f->data[0];
1930  srcU = s->next_picture.f->data[1];
1931  srcV = s->next_picture.f->data[2];
1932 
1933  src_x = s->mb_x * 16 + (mx >> 2);
1934  src_y = s->mb_y * 16 + (my >> 2);
1935  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1936  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1937 
1938  if (v->profile != PROFILE_ADVANCED) {
1939  src_x = av_clip( src_x, -16, s->mb_width * 16);
1940  src_y = av_clip( src_y, -16, s->mb_height * 16);
1941  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1942  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1943  } else {
1944  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1945  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1946  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1947  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1948  }
1949 
1950  srcY += src_y * s->linesize + src_x;
1951  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1952  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1953 
1954  if (v->field_mode && v->ref_field_type[1]) {
1955  srcY += s->current_picture_ptr->f->linesize[0];
1956  srcU += s->current_picture_ptr->f->linesize[1];
1957  srcV += s->current_picture_ptr->f->linesize[2];
1958  }
1959 
1960  /* for grayscale we should not try to read from unknown area */
1961  if (s->flags & CODEC_FLAG_GRAY) {
1962  srcU = s->edge_emu_buffer + 18 * s->linesize;
1963  srcV = s->edge_emu_buffer + 18 * s->linesize;
1964  }
1965 
1966  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1967  || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1968  || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1969  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1970 
1971  srcY -= s->mspel * (1 + s->linesize);
1973  s->linesize, s->linesize,
1974  17 + s->mspel * 2, 17 + s->mspel * 2,
1975  src_x - s->mspel, src_y - s->mspel,
1976  s->h_edge_pos, v_edge_pos);
1977  srcY = s->edge_emu_buffer;
1978  s->vdsp.emulated_edge_mc(uvbuf, srcU,
1979  s->uvlinesize, s->uvlinesize,
1980  8 + 1, 8 + 1,
1981  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1982  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1983  s->uvlinesize, s->uvlinesize,
1984  8 + 1, 8 + 1,
1985  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1986  srcU = uvbuf;
1987  srcV = uvbuf + 16;
1988  /* if we deal with range reduction we need to scale source blocks */
1989  if (v->rangeredfrm) {
1990  int i, j;
1991  uint8_t *src, *src2;
1992 
1993  src = srcY;
1994  for (j = 0; j < 17 + s->mspel * 2; j++) {
1995  for (i = 0; i < 17 + s->mspel * 2; i++)
1996  src[i] = ((src[i] - 128) >> 1) + 128;
1997  src += s->linesize;
1998  }
1999  src = srcU;
2000  src2 = srcV;
2001  for (j = 0; j < 9; j++) {
2002  for (i = 0; i < 9; i++) {
2003  src[i] = ((src[i] - 128) >> 1) + 128;
2004  src2[i] = ((src2[i] - 128) >> 1) + 128;
2005  }
2006  src += s->uvlinesize;
2007  src2 += s->uvlinesize;
2008  }
2009  }
2010 
2011  if (use_ic) {
2012  uint8_t (*luty )[256] = v->next_luty;
2013  uint8_t (*lutuv)[256] = v->next_lutuv;
2014  int i, j;
2015  uint8_t *src, *src2;
2016 
2017  src = srcY;
2018  for (j = 0; j < 17 + s->mspel * 2; j++) {
2019  int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2020  for (i = 0; i < 17 + s->mspel * 2; i++)
2021  src[i] = luty[f][src[i]];
2022  src += s->linesize;
2023  }
2024  src = srcU;
2025  src2 = srcV;
2026  for (j = 0; j < 9; j++) {
2027  int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2028  for (i = 0; i < 9; i++) {
2029  src[i] = lutuv[f][src[i]];
2030  src2[i] = lutuv[f][src2[i]];
2031  }
2032  src += s->uvlinesize;
2033  src2 += s->uvlinesize;
2034  }
2035  }
2036  srcY += s->mspel * (1 + s->linesize);
2037  }
2038 
2039  off = 0;
2040  off_uv = 0;
2041 
2042  if (s->mspel) {
2043  dxy = ((my & 3) << 2) | (mx & 3);
2044  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2045  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2046  srcY += s->linesize * 8;
2047  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2048  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2049  } else { // hpel mc
2050  dxy = (my & 2) | ((mx & 2) >> 1);
2051 
2052  if (!v->rnd)
2053  s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2054  else
2055  s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2056  }
2057 
2058  if (s->flags & CODEC_FLAG_GRAY) return;
2059  /* Chroma MC always uses qpel blilinear */
2060  uvmx = (uvmx & 3) << 1;
2061  uvmy = (uvmy & 3) << 1;
2062  if (!v->rnd) {
2063  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2064  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2065  } else {
2066  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2067  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2068  }
2069 }
2070 
2071 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2072 {
2073  int n = bfrac;
2074 
2075 #if B_FRACTION_DEN==256
2076  if (inv)
2077  n -= 256;
2078  if (!qs)
2079  return 2 * ((value * n + 255) >> 9);
2080  return (value * n + 128) >> 8;
2081 #else
2082  if (inv)
2083  n -= B_FRACTION_DEN;
2084  if (!qs)
2085  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2086  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2087 #endif
2088 }
2089 
2092 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2093  int direct, int mode)
2094 {
2095  if (direct) {
2096  vc1_mc_1mv(v, 0);
2097  vc1_interp_mc(v);
2098  return;
2099  }
2100  if (mode == BMV_TYPE_INTERPOLATED) {
2101  vc1_mc_1mv(v, 0);
2102  vc1_interp_mc(v);
2103  return;
2104  }
2105 
2106  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2107 }
2108 
2109 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2110  int direct, int mvtype)
2111 {
2112  MpegEncContext *s = &v->s;
2113  int xy, wrap, off = 0;
2114  int16_t *A, *B, *C;
2115  int px, py;
2116  int sum;
2117  int r_x, r_y;
2118  const uint8_t *is_intra = v->mb_type[0];
2119 
2120  r_x = v->range_x;
2121  r_y = v->range_y;
2122  /* scale MV difference to be quad-pel */
2123  dmv_x[0] <<= 1 - s->quarter_sample;
2124  dmv_y[0] <<= 1 - s->quarter_sample;
2125  dmv_x[1] <<= 1 - s->quarter_sample;
2126  dmv_y[1] <<= 1 - s->quarter_sample;
2127 
2128  wrap = s->b8_stride;
2129  xy = s->block_index[0];
2130 
2131  if (s->mb_intra) {
2132  s->current_picture.motion_val[0][xy + v->blocks_off][0] =
2133  s->current_picture.motion_val[0][xy + v->blocks_off][1] =
2134  s->current_picture.motion_val[1][xy + v->blocks_off][0] =
2135  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
2136  return;
2137  }
2138  if (!v->field_mode) {
2139  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2140  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2141  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2142  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2143 
2144  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2145  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2146  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2147  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2148  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2149  }
2150  if (direct) {
2151  s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2152  s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2153  s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2154  s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2155  return;
2156  }
2157 
2158  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2159  C = s->current_picture.motion_val[0][xy - 2];
2160  A = s->current_picture.motion_val[0][xy - wrap * 2];
2161  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2162  B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2163 
2164  if (!s->mb_x) C[0] = C[1] = 0;
2165  if (!s->first_slice_line) { // predictor A is not out of bounds
2166  if (s->mb_width == 1) {
2167  px = A[0];
2168  py = A[1];
2169  } else {
2170  px = mid_pred(A[0], B[0], C[0]);
2171  py = mid_pred(A[1], B[1], C[1]);
2172  }
2173  } else if (s->mb_x) { // predictor C is not out of bounds
2174  px = C[0];
2175  py = C[1];
2176  } else {
2177  px = py = 0;
2178  }
2179  /* Pullback MV as specified in 8.3.5.3.4 */
2180  {
2181  int qx, qy, X, Y;
2182  if (v->profile < PROFILE_ADVANCED) {
2183  qx = (s->mb_x << 5);
2184  qy = (s->mb_y << 5);
2185  X = (s->mb_width << 5) - 4;
2186  Y = (s->mb_height << 5) - 4;
2187  if (qx + px < -28) px = -28 - qx;
2188  if (qy + py < -28) py = -28 - qy;
2189  if (qx + px > X) px = X - qx;
2190  if (qy + py > Y) py = Y - qy;
2191  } else {
2192  qx = (s->mb_x << 6);
2193  qy = (s->mb_y << 6);
2194  X = (s->mb_width << 6) - 4;
2195  Y = (s->mb_height << 6) - 4;
2196  if (qx + px < -60) px = -60 - qx;
2197  if (qy + py < -60) py = -60 - qy;
2198  if (qx + px > X) px = X - qx;
2199  if (qy + py > Y) py = Y - qy;
2200  }
2201  }
2202  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2203  if (0 && !s->first_slice_line && s->mb_x) {
2204  if (is_intra[xy - wrap])
2205  sum = FFABS(px) + FFABS(py);
2206  else
2207  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2208  if (sum > 32) {
2209  if (get_bits1(&s->gb)) {
2210  px = A[0];
2211  py = A[1];
2212  } else {
2213  px = C[0];
2214  py = C[1];
2215  }
2216  } else {
2217  if (is_intra[xy - 2])
2218  sum = FFABS(px) + FFABS(py);
2219  else
2220  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2221  if (sum > 32) {
2222  if (get_bits1(&s->gb)) {
2223  px = A[0];
2224  py = A[1];
2225  } else {
2226  px = C[0];
2227  py = C[1];
2228  }
2229  }
2230  }
2231  }
2232  /* store MV using signed modulus of MV range defined in 4.11 */
2233  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2234  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2235  }
2236  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2237  C = s->current_picture.motion_val[1][xy - 2];
2238  A = s->current_picture.motion_val[1][xy - wrap * 2];
2239  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2240  B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2241 
2242  if (!s->mb_x)
2243  C[0] = C[1] = 0;
2244  if (!s->first_slice_line) { // predictor A is not out of bounds
2245  if (s->mb_width == 1) {
2246  px = A[0];
2247  py = A[1];
2248  } else {
2249  px = mid_pred(A[0], B[0], C[0]);
2250  py = mid_pred(A[1], B[1], C[1]);
2251  }
2252  } else if (s->mb_x) { // predictor C is not out of bounds
2253  px = C[0];
2254  py = C[1];
2255  } else {
2256  px = py = 0;
2257  }
2258  /* Pullback MV as specified in 8.3.5.3.4 */
2259  {
2260  int qx, qy, X, Y;
2261  if (v->profile < PROFILE_ADVANCED) {
2262  qx = (s->mb_x << 5);
2263  qy = (s->mb_y << 5);
2264  X = (s->mb_width << 5) - 4;
2265  Y = (s->mb_height << 5) - 4;
2266  if (qx + px < -28) px = -28 - qx;
2267  if (qy + py < -28) py = -28 - qy;
2268  if (qx + px > X) px = X - qx;
2269  if (qy + py > Y) py = Y - qy;
2270  } else {
2271  qx = (s->mb_x << 6);
2272  qy = (s->mb_y << 6);
2273  X = (s->mb_width << 6) - 4;
2274  Y = (s->mb_height << 6) - 4;
2275  if (qx + px < -60) px = -60 - qx;
2276  if (qy + py < -60) py = -60 - qy;
2277  if (qx + px > X) px = X - qx;
2278  if (qy + py > Y) py = Y - qy;
2279  }
2280  }
2281  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2282  if (0 && !s->first_slice_line && s->mb_x) {
2283  if (is_intra[xy - wrap])
2284  sum = FFABS(px) + FFABS(py);
2285  else
2286  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2287  if (sum > 32) {
2288  if (get_bits1(&s->gb)) {
2289  px = A[0];
2290  py = A[1];
2291  } else {
2292  px = C[0];
2293  py = C[1];
2294  }
2295  } else {
2296  if (is_intra[xy - 2])
2297  sum = FFABS(px) + FFABS(py);
2298  else
2299  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2300  if (sum > 32) {
2301  if (get_bits1(&s->gb)) {
2302  px = A[0];
2303  py = A[1];
2304  } else {
2305  px = C[0];
2306  py = C[1];
2307  }
2308  }
2309  }
2310  }
2311  /* store MV using signed modulus of MV range defined in 4.11 */
2312 
2313  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2314  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2315  }
2316  s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2317  s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2318  s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2319  s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2320 }
2321 
2322 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2323 {
2324  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2325  MpegEncContext *s = &v->s;
2326  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2327 
2328  if (v->bmvtype == BMV_TYPE_DIRECT) {
2329  int total_opp, k, f;
2330  if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2331  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2332  v->bfraction, 0, s->quarter_sample);
2333  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2334  v->bfraction, 0, s->quarter_sample);
2335  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2336  v->bfraction, 1, s->quarter_sample);
2337  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2338  v->bfraction, 1, s->quarter_sample);
2339 
2340  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2341  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2342  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2343  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2344  f = (total_opp > 2) ? 1 : 0;
2345  } else {
2346  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2347  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2348  f = 0;
2349  }
2350  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2351  for (k = 0; k < 4; k++) {
2352  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2353  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2354  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2355  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2356  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2357  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2358  }
2359  return;
2360  }
2361  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2362  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2363  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2364  return;
2365  }
2366  if (dir) { // backward
2367  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2368  if (n == 3 || mv1) {
2369  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2370  }
2371  } else { // forward
2372  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2373  if (n == 3 || mv1) {
2374  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2375  }
2376  }
2377 }
2378 
2388 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2389  int16_t **dc_val_ptr, int *dir_ptr)
2390 {
2391  int a, b, c, wrap, pred, scale;
2392  int16_t *dc_val;
2393  static const uint16_t dcpred[32] = {
2394  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2395  114, 102, 93, 85, 79, 73, 68, 64,
2396  60, 57, 54, 51, 49, 47, 45, 43,
2397  41, 39, 38, 37, 35, 34, 33
2398  };
2399 
2400  /* find prediction - wmv3_dc_scale always used here in fact */
2401  if (n < 4) scale = s->y_dc_scale;
2402  else scale = s->c_dc_scale;
2403 
2404  wrap = s->block_wrap[n];
2405  dc_val = s->dc_val[0] + s->block_index[n];
2406 
2407  /* B A
2408  * C X
2409  */
2410  c = dc_val[ - 1];
2411  b = dc_val[ - 1 - wrap];
2412  a = dc_val[ - wrap];
2413 
2414  if (pq < 9 || !overlap) {
2415  /* Set outer values */
2416  if (s->first_slice_line && (n != 2 && n != 3))
2417  b = a = dcpred[scale];
2418  if (s->mb_x == 0 && (n != 1 && n != 3))
2419  b = c = dcpred[scale];
2420  } else {
2421  /* Set outer values */
2422  if (s->first_slice_line && (n != 2 && n != 3))
2423  b = a = 0;
2424  if (s->mb_x == 0 && (n != 1 && n != 3))
2425  b = c = 0;
2426  }
2427 
2428  if (abs(a - b) <= abs(b - c)) {
2429  pred = c;
2430  *dir_ptr = 1; // left
2431  } else {
2432  pred = a;
2433  *dir_ptr = 0; // top
2434  }
2435 
2436  /* update predictor */
2437  *dc_val_ptr = &dc_val[0];
2438  return pred;
2439 }
2440 
2441 
2453 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2454  int a_avail, int c_avail,
2455  int16_t **dc_val_ptr, int *dir_ptr)
2456 {
2457  int a, b, c, wrap, pred;
2458  int16_t *dc_val;
2459  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2460  int q1, q2 = 0;
2461  int dqscale_index;
2462 
2463  wrap = s->block_wrap[n];
2464  dc_val = s->dc_val[0] + s->block_index[n];
2465 
2466  /* B A
2467  * C X
2468  */
2469  c = dc_val[ - 1];
2470  b = dc_val[ - 1 - wrap];
2471  a = dc_val[ - wrap];
2472  /* scale predictors if needed */
2473  q1 = s->current_picture.qscale_table[mb_pos];
2474  dqscale_index = s->y_dc_scale_table[q1] - 1;
2475  if (dqscale_index < 0)
2476  return 0;
2477  if (c_avail && (n != 1 && n != 3)) {
2478  q2 = s->current_picture.qscale_table[mb_pos - 1];
2479  if (q2 && q2 != q1)
2480  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2481  }
2482  if (a_avail && (n != 2 && n != 3)) {
2483  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2484  if (q2 && q2 != q1)
2485  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2486  }
2487  if (a_avail && c_avail && (n != 3)) {
2488  int off = mb_pos;
2489  if (n != 1)
2490  off--;
2491  if (n != 2)
2492  off -= s->mb_stride;
2493  q2 = s->current_picture.qscale_table[off];
2494  if (q2 && q2 != q1)
2495  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2496  }
2497 
2498  if (a_avail && c_avail) {
2499  if (abs(a - b) <= abs(b - c)) {
2500  pred = c;
2501  *dir_ptr = 1; // left
2502  } else {
2503  pred = a;
2504  *dir_ptr = 0; // top
2505  }
2506  } else if (a_avail) {
2507  pred = a;
2508  *dir_ptr = 0; // top
2509  } else if (c_avail) {
2510  pred = c;
2511  *dir_ptr = 1; // left
2512  } else {
2513  pred = 0;
2514  *dir_ptr = 1; // left
2515  }
2516 
2517  /* update predictor */
2518  *dc_val_ptr = &dc_val[0];
2519  return pred;
2520 }
2521  // Block group
2523 
2530 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2531  uint8_t **coded_block_ptr)
2532 {
2533  int xy, wrap, pred, a, b, c;
2534 
2535  xy = s->block_index[n];
2536  wrap = s->b8_stride;
2537 
2538  /* B C
2539  * A X
2540  */
2541  a = s->coded_block[xy - 1 ];
2542  b = s->coded_block[xy - 1 - wrap];
2543  c = s->coded_block[xy - wrap];
2544 
2545  if (b == c) {
2546  pred = a;
2547  } else {
2548  pred = c;
2549  }
2550 
2551  /* store value */
2552  *coded_block_ptr = &s->coded_block[xy];
2553 
2554  return pred;
2555 }
2556 
2566 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2567  int *value, int codingset)
2568 {
2569  GetBitContext *gb = &v->s.gb;
2570  int index, escape, run = 0, level = 0, lst = 0;
2571 
2572  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2573  if (index != ff_vc1_ac_sizes[codingset] - 1) {
2574  run = vc1_index_decode_table[codingset][index][0];
2575  level = vc1_index_decode_table[codingset][index][1];
2576  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2577  if (get_bits1(gb))
2578  level = -level;
2579  } else {
2580  escape = decode210(gb);
2581  if (escape != 2) {
2582  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2583  run = vc1_index_decode_table[codingset][index][0];
2584  level = vc1_index_decode_table[codingset][index][1];
2585  lst = index >= vc1_last_decode_table[codingset];
2586  if (escape == 0) {
2587  if (lst)
2588  level += vc1_last_delta_level_table[codingset][run];
2589  else
2590  level += vc1_delta_level_table[codingset][run];
2591  } else {
2592  if (lst)
2593  run += vc1_last_delta_run_table[codingset][level] + 1;
2594  else
2595  run += vc1_delta_run_table[codingset][level] + 1;
2596  }
2597  if (get_bits1(gb))
2598  level = -level;
2599  } else {
2600  int sign;
2601  lst = get_bits1(gb);
2602  if (v->s.esc3_level_length == 0) {
2603  if (v->pq < 8 || v->dquantfrm) { // table 59
2604  v->s.esc3_level_length = get_bits(gb, 3);
2605  if (!v->s.esc3_level_length)
2606  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2607  } else { // table 60
2608  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2609  }
2610  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2611  }
2612  run = get_bits(gb, v->s.esc3_run_length);
2613  sign = get_bits1(gb);
2614  level = get_bits(gb, v->s.esc3_level_length);
2615  if (sign)
2616  level = -level;
2617  }
2618  }
2619 
2620  *last = lst;
2621  *skip = run;
2622  *value = level;
2623 }
2624 
2632 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2633  int coded, int codingset)
2634 {
2635  GetBitContext *gb = &v->s.gb;
2636  MpegEncContext *s = &v->s;
2637  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2638  int i;
2639  int16_t *dc_val;
2640  int16_t *ac_val, *ac_val2;
2641  int dcdiff;
2642 
2643  /* Get DC differential */
2644  if (n < 4) {
2646  } else {
2648  }
2649  if (dcdiff < 0) {
2650  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2651  return -1;
2652  }
2653  if (dcdiff) {
2654  if (dcdiff == 119 /* ESC index value */) {
2655  /* TODO: Optimize */
2656  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2657  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2658  else dcdiff = get_bits(gb, 8);
2659  } else {
2660  if (v->pq == 1)
2661  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2662  else if (v->pq == 2)
2663  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2664  }
2665  if (get_bits1(gb))
2666  dcdiff = -dcdiff;
2667  }
2668 
2669  /* Prediction */
2670  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2671  *dc_val = dcdiff;
2672 
2673  /* Store the quantized DC coeff, used for prediction */
2674  if (n < 4) {
2675  block[0] = dcdiff * s->y_dc_scale;
2676  } else {
2677  block[0] = dcdiff * s->c_dc_scale;
2678  }
2679  /* Skip ? */
2680  if (!coded) {
2681  goto not_coded;
2682  }
2683 
2684  // AC Decoding
2685  i = 1;
2686 
2687  {
2688  int last = 0, skip, value;
2689  const uint8_t *zz_table;
2690  int scale;
2691  int k;
2692 
2693  scale = v->pq * 2 + v->halfpq;
2694 
2695  if (v->s.ac_pred) {
2696  if (!dc_pred_dir)
2697  zz_table = v->zz_8x8[2];
2698  else
2699  zz_table = v->zz_8x8[3];
2700  } else
2701  zz_table = v->zz_8x8[1];
2702 
2703  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2704  ac_val2 = ac_val;
2705  if (dc_pred_dir) // left
2706  ac_val -= 16;
2707  else // top
2708  ac_val -= 16 * s->block_wrap[n];
2709 
2710  while (!last) {
2711  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2712  i += skip;
2713  if (i > 63)
2714  break;
2715  block[zz_table[i++]] = value;
2716  }
2717 
2718  /* apply AC prediction if needed */
2719  if (s->ac_pred) {
2720  if (dc_pred_dir) { // left
2721  for (k = 1; k < 8; k++)
2722  block[k << v->left_blk_sh] += ac_val[k];
2723  } else { // top
2724  for (k = 1; k < 8; k++)
2725  block[k << v->top_blk_sh] += ac_val[k + 8];
2726  }
2727  }
2728  /* save AC coeffs for further prediction */
2729  for (k = 1; k < 8; k++) {
2730  ac_val2[k] = block[k << v->left_blk_sh];
2731  ac_val2[k + 8] = block[k << v->top_blk_sh];
2732  }
2733 
2734  /* scale AC coeffs */
2735  for (k = 1; k < 64; k++)
2736  if (block[k]) {
2737  block[k] *= scale;
2738  if (!v->pquantizer)
2739  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2740  }
2741 
2742  if (s->ac_pred) i = 63;
2743  }
2744 
2745 not_coded:
2746  if (!coded) {
2747  int k, scale;
2748  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2749  ac_val2 = ac_val;
2750 
2751  i = 0;
2752  scale = v->pq * 2 + v->halfpq;
2753  memset(ac_val2, 0, 16 * 2);
2754  if (dc_pred_dir) { // left
2755  ac_val -= 16;
2756  if (s->ac_pred)
2757  memcpy(ac_val2, ac_val, 8 * 2);
2758  } else { // top
2759  ac_val -= 16 * s->block_wrap[n];
2760  if (s->ac_pred)
2761  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2762  }
2763 
2764  /* apply AC prediction if needed */
2765  if (s->ac_pred) {
2766  if (dc_pred_dir) { //left
2767  for (k = 1; k < 8; k++) {
2768  block[k << v->left_blk_sh] = ac_val[k] * scale;
2769  if (!v->pquantizer && block[k << v->left_blk_sh])
2770  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2771  }
2772  } else { // top
2773  for (k = 1; k < 8; k++) {
2774  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2775  if (!v->pquantizer && block[k << v->top_blk_sh])
2776  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2777  }
2778  }
2779  i = 63;
2780  }
2781  }
2782  s->block_last_index[n] = i;
2783 
2784  return 0;
2785 }
2786 
2795 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2796  int coded, int codingset, int mquant)
2797 {
2798  GetBitContext *gb = &v->s.gb;
2799  MpegEncContext *s = &v->s;
2800  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2801  int i;
2802  int16_t *dc_val;
2803  int16_t *ac_val, *ac_val2;
2804  int dcdiff;
2805  int a_avail = v->a_avail, c_avail = v->c_avail;
2806  int use_pred = s->ac_pred;
2807  int scale;
2808  int q1, q2 = 0;
2809  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2810 
2811  /* Get DC differential */
2812  if (n < 4) {
2814  } else {
2816  }
2817  if (dcdiff < 0) {
2818  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2819  return -1;
2820  }
2821  if (dcdiff) {
2822  if (dcdiff == 119 /* ESC index value */) {
2823  /* TODO: Optimize */
2824  if (mquant == 1) dcdiff = get_bits(gb, 10);
2825  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2826  else dcdiff = get_bits(gb, 8);
2827  } else {
2828  if (mquant == 1)
2829  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2830  else if (mquant == 2)
2831  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2832  }
2833  if (get_bits1(gb))
2834  dcdiff = -dcdiff;
2835  }
2836 
2837  /* Prediction */
2838  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2839  *dc_val = dcdiff;
2840 
2841  /* Store the quantized DC coeff, used for prediction */
2842  if (n < 4) {
2843  block[0] = dcdiff * s->y_dc_scale;
2844  } else {
2845  block[0] = dcdiff * s->c_dc_scale;
2846  }
2847 
2848  //AC Decoding
2849  i = 1;
2850 
2851  /* check if AC is needed at all */
2852  if (!a_avail && !c_avail)
2853  use_pred = 0;
2854  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2855  ac_val2 = ac_val;
2856 
2857  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2858 
2859  if (dc_pred_dir) // left
2860  ac_val -= 16;
2861  else // top
2862  ac_val -= 16 * s->block_wrap[n];
2863 
2864  q1 = s->current_picture.qscale_table[mb_pos];
2865  if ( dc_pred_dir && c_avail && mb_pos)
2866  q2 = s->current_picture.qscale_table[mb_pos - 1];
2867  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2868  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2869  if ( dc_pred_dir && n == 1)
2870  q2 = q1;
2871  if (!dc_pred_dir && n == 2)
2872  q2 = q1;
2873  if (n == 3)
2874  q2 = q1;
2875 
2876  if (coded) {
2877  int last = 0, skip, value;
2878  const uint8_t *zz_table;
2879  int k;
2880 
2881  if (v->s.ac_pred) {
2882  if (!use_pred && v->fcm == ILACE_FRAME) {
2883  zz_table = v->zzi_8x8;
2884  } else {
2885  if (!dc_pred_dir) // top
2886  zz_table = v->zz_8x8[2];
2887  else // left
2888  zz_table = v->zz_8x8[3];
2889  }
2890  } else {
2891  if (v->fcm != ILACE_FRAME)
2892  zz_table = v->zz_8x8[1];
2893  else
2894  zz_table = v->zzi_8x8;
2895  }
2896 
2897  while (!last) {
2898  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2899  i += skip;
2900  if (i > 63)
2901  break;
2902  block[zz_table[i++]] = value;
2903  }
2904 
2905  /* apply AC prediction if needed */
2906  if (use_pred) {
2907  /* scale predictors if needed*/
2908  if (q2 && q1 != q2) {
2909  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2910  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2911 
2912  if (q1 < 1)
2913  return AVERROR_INVALIDDATA;
2914  if (dc_pred_dir) { // left
2915  for (k = 1; k < 8; k++)
2916  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2917  } else { // top
2918  for (k = 1; k < 8; k++)
2919  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2920  }
2921  } else {
2922  if (dc_pred_dir) { //left
2923  for (k = 1; k < 8; k++)
2924  block[k << v->left_blk_sh] += ac_val[k];
2925  } else { //top
2926  for (k = 1; k < 8; k++)
2927  block[k << v->top_blk_sh] += ac_val[k + 8];
2928  }
2929  }
2930  }
2931  /* save AC coeffs for further prediction */
2932  for (k = 1; k < 8; k++) {
2933  ac_val2[k ] = block[k << v->left_blk_sh];
2934  ac_val2[k + 8] = block[k << v->top_blk_sh];
2935  }
2936 
2937  /* scale AC coeffs */
2938  for (k = 1; k < 64; k++)
2939  if (block[k]) {
2940  block[k] *= scale;
2941  if (!v->pquantizer)
2942  block[k] += (block[k] < 0) ? -mquant : mquant;
2943  }
2944 
2945  if (use_pred) i = 63;
2946  } else { // no AC coeffs
2947  int k;
2948 
2949  memset(ac_val2, 0, 16 * 2);
2950  if (dc_pred_dir) { // left
2951  if (use_pred) {
2952  memcpy(ac_val2, ac_val, 8 * 2);
2953  if (q2 && q1 != q2) {
2954  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2955  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2956  if (q1 < 1)
2957  return AVERROR_INVALIDDATA;
2958  for (k = 1; k < 8; k++)
2959  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2960  }
2961  }
2962  } else { // top
2963  if (use_pred) {
2964  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2965  if (q2 && q1 != q2) {
2966  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2967  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2968  if (q1 < 1)
2969  return AVERROR_INVALIDDATA;
2970  for (k = 1; k < 8; k++)
2971  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2972  }
2973  }
2974  }
2975 
2976  /* apply AC prediction if needed */
2977  if (use_pred) {
2978  if (dc_pred_dir) { // left
2979  for (k = 1; k < 8; k++) {
2980  block[k << v->left_blk_sh] = ac_val2[k] * scale;
2981  if (!v->pquantizer && block[k << v->left_blk_sh])
2982  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2983  }
2984  } else { // top
2985  for (k = 1; k < 8; k++) {
2986  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2987  if (!v->pquantizer && block[k << v->top_blk_sh])
2988  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2989  }
2990  }
2991  i = 63;
2992  }
2993  }
2994  s->block_last_index[n] = i;
2995 
2996  return 0;
2997 }
2998 
3007 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3008  int coded, int mquant, int codingset)
3009 {
3010  GetBitContext *gb = &v->s.gb;
3011  MpegEncContext *s = &v->s;
3012  int dc_pred_dir = 0; /* Direction of the DC prediction used */
3013  int i;
3014  int16_t *dc_val;
3015  int16_t *ac_val, *ac_val2;
3016  int dcdiff;
3017  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3018  int a_avail = v->a_avail, c_avail = v->c_avail;
3019  int use_pred = s->ac_pred;
3020  int scale;
3021  int q1, q2 = 0;
3022 
3023  s->bdsp.clear_block(block);
3024 
3025  /* XXX: Guard against dumb values of mquant */
3026  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3027 
3028  /* Set DC scale - y and c use the same */
3029  s->y_dc_scale = s->y_dc_scale_table[mquant];
3030  s->c_dc_scale = s->c_dc_scale_table[mquant];
3031 
3032  /* Get DC differential */
3033  if (n < 4) {
3035  } else {
3037  }
3038  if (dcdiff < 0) {
3039  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3040  return -1;
3041  }
3042  if (dcdiff) {
3043  if (dcdiff == 119 /* ESC index value */) {
3044  /* TODO: Optimize */
3045  if (mquant == 1) dcdiff = get_bits(gb, 10);
3046  else if (mquant == 2) dcdiff = get_bits(gb, 9);
3047  else dcdiff = get_bits(gb, 8);
3048  } else {
3049  if (mquant == 1)
3050  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3051  else if (mquant == 2)
3052  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3053  }
3054  if (get_bits1(gb))
3055  dcdiff = -dcdiff;
3056  }
3057 
3058  /* Prediction */
3059  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3060  *dc_val = dcdiff;
3061 
3062  /* Store the quantized DC coeff, used for prediction */
3063 
3064  if (n < 4) {
3065  block[0] = dcdiff * s->y_dc_scale;
3066  } else {
3067  block[0] = dcdiff * s->c_dc_scale;
3068  }
3069 
3070  //AC Decoding
3071  i = 1;
3072 
3073  /* check if AC is needed at all and adjust direction if needed */
3074  if (!a_avail) dc_pred_dir = 1;
3075  if (!c_avail) dc_pred_dir = 0;
3076  if (!a_avail && !c_avail) use_pred = 0;
3077  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3078  ac_val2 = ac_val;
3079 
3080  scale = mquant * 2 + v->halfpq;
3081 
3082  if (dc_pred_dir) //left
3083  ac_val -= 16;
3084  else //top
3085  ac_val -= 16 * s->block_wrap[n];
3086 
3087  q1 = s->current_picture.qscale_table[mb_pos];
3088  if (dc_pred_dir && c_avail && mb_pos)
3089  q2 = s->current_picture.qscale_table[mb_pos - 1];
3090  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3091  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3092  if ( dc_pred_dir && n == 1)
3093  q2 = q1;
3094  if (!dc_pred_dir && n == 2)
3095  q2 = q1;
3096  if (n == 3) q2 = q1;
3097 
3098  if (coded) {
3099  int last = 0, skip, value;
3100  int k;
3101 
3102  while (!last) {
3103  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3104  i += skip;
3105  if (i > 63)
3106  break;
3107  if (v->fcm == PROGRESSIVE)
3108  block[v->zz_8x8[0][i++]] = value;
3109  else {
3110  if (use_pred && (v->fcm == ILACE_FRAME)) {
3111  if (!dc_pred_dir) // top
3112  block[v->zz_8x8[2][i++]] = value;
3113  else // left
3114  block[v->zz_8x8[3][i++]] = value;
3115  } else {
3116  block[v->zzi_8x8[i++]] = value;
3117  }
3118  }
3119  }
3120 
3121  /* apply AC prediction if needed */
3122  if (use_pred) {
3123  /* scale predictors if needed*/
3124  if (q2 && q1 != q2) {
3125  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3126  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3127 
3128  if (q1 < 1)
3129  return AVERROR_INVALIDDATA;
3130  if (dc_pred_dir) { // left
3131  for (k = 1; k < 8; k++)
3132  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3133  } else { //top
3134  for (k = 1; k < 8; k++)
3135  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3136  }
3137  } else {
3138  if (dc_pred_dir) { // left
3139  for (k = 1; k < 8; k++)
3140  block[k << v->left_blk_sh] += ac_val[k];
3141  } else { // top
3142  for (k = 1; k < 8; k++)
3143  block[k << v->top_blk_sh] += ac_val[k + 8];
3144  }
3145  }
3146  }
3147  /* save AC coeffs for further prediction */
3148  for (k = 1; k < 8; k++) {
3149  ac_val2[k ] = block[k << v->left_blk_sh];
3150  ac_val2[k + 8] = block[k << v->top_blk_sh];
3151  }
3152 
3153  /* scale AC coeffs */
3154  for (k = 1; k < 64; k++)
3155  if (block[k]) {
3156  block[k] *= scale;
3157  if (!v->pquantizer)
3158  block[k] += (block[k] < 0) ? -mquant : mquant;
3159  }
3160 
3161  if (use_pred) i = 63;
3162  } else { // no AC coeffs
3163  int k;
3164 
3165  memset(ac_val2, 0, 16 * 2);
3166  if (dc_pred_dir) { // left
3167  if (use_pred) {
3168  memcpy(ac_val2, ac_val, 8 * 2);
3169  if (q2 && q1 != q2) {
3170  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3171  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3172  if (q1 < 1)
3173  return AVERROR_INVALIDDATA;
3174  for (k = 1; k < 8; k++)
3175  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3176  }
3177  }
3178  } else { // top
3179  if (use_pred) {
3180  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3181  if (q2 && q1 != q2) {
3182  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3183  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3184  if (q1 < 1)
3185  return AVERROR_INVALIDDATA;
3186  for (k = 1; k < 8; k++)
3187  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3188  }
3189  }
3190  }
3191 
3192  /* apply AC prediction if needed */
3193  if (use_pred) {
3194  if (dc_pred_dir) { // left
3195  for (k = 1; k < 8; k++) {
3196  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3197  if (!v->pquantizer && block[k << v->left_blk_sh])
3198  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3199  }
3200  } else { // top
3201  for (k = 1; k < 8; k++) {
3202  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3203  if (!v->pquantizer && block[k << v->top_blk_sh])
3204  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3205  }
3206  }
3207  i = 63;
3208  }
3209  }
3210  s->block_last_index[n] = i;
3211 
3212  return 0;
3213 }
3214 
3217 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3218  int mquant, int ttmb, int first_block,
3219  uint8_t *dst, int linesize, int skip_block,
3220  int *ttmb_out)
3221 {
3222  MpegEncContext *s = &v->s;
3223  GetBitContext *gb = &s->gb;
3224  int i, j;
3225  int subblkpat = 0;
3226  int scale, off, idx, last, skip, value;
3227  int ttblk = ttmb & 7;
3228  int pat = 0;
3229 
3230  s->bdsp.clear_block(block);
3231 
3232  if (ttmb == -1) {
3234  }
3235  if (ttblk == TT_4X4) {
3236  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3237  }
3238  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3239  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3240  || (!v->res_rtm_flag && !first_block))) {
3241  subblkpat = decode012(gb);
3242  if (subblkpat)
3243  subblkpat ^= 3; // swap decoded pattern bits
3244  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3245  ttblk = TT_8X4;
3246  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3247  ttblk = TT_4X8;
3248  }
3249  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3250 
3251  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3252  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3253  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3254  ttblk = TT_8X4;
3255  }
3256  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3257  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3258  ttblk = TT_4X8;
3259  }
3260  switch (ttblk) {
3261  case TT_8X8:
3262  pat = 0xF;
3263  i = 0;
3264  last = 0;
3265  while (!last) {
3266  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3267  i += skip;
3268  if (i > 63)
3269  break;
3270  if (!v->fcm)
3271  idx = v->zz_8x8[0][i++];
3272  else
3273  idx = v->zzi_8x8[i++];
3274  block[idx] = value * scale;
3275  if (!v->pquantizer)
3276  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3277  }
3278  if (!skip_block) {
3279  if (i == 1)
3280  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3281  else {
3282  v->vc1dsp.vc1_inv_trans_8x8(block);
3283  s->idsp.add_pixels_clamped(block, dst, linesize);
3284  }
3285  }
3286  break;
3287  case TT_4X4:
3288  pat = ~subblkpat & 0xF;
3289  for (j = 0; j < 4; j++) {
3290  last = subblkpat & (1 << (3 - j));
3291  i = 0;
3292  off = (j & 1) * 4 + (j & 2) * 16;
3293  while (!last) {
3294  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3295  i += skip;
3296  if (i > 15)
3297  break;
3298  if (!v->fcm)
3300  else
3301  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3302  block[idx + off] = value * scale;
3303  if (!v->pquantizer)
3304  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3305  }
3306  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3307  if (i == 1)
3308  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3309  else
3310  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3311  }
3312  }
3313  break;
3314  case TT_8X4:
3315  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3316  for (j = 0; j < 2; j++) {
3317  last = subblkpat & (1 << (1 - j));
3318  i = 0;
3319  off = j * 32;
3320  while (!last) {
3321  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3322  i += skip;
3323  if (i > 31)
3324  break;
3325  if (!v->fcm)
3326  idx = v->zz_8x4[i++] + off;
3327  else
3328  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3329  block[idx] = value * scale;
3330  if (!v->pquantizer)
3331  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3332  }
3333  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3334  if (i == 1)
3335  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3336  else
3337  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3338  }
3339  }
3340  break;
3341  case TT_4X8:
3342  pat = ~(subblkpat * 5) & 0xF;
3343  for (j = 0; j < 2; j++) {
3344  last = subblkpat & (1 << (1 - j));
3345  i = 0;
3346  off = j * 4;
3347  while (!last) {
3348  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3349  i += skip;
3350  if (i > 31)
3351  break;
3352  if (!v->fcm)
3353  idx = v->zz_4x8[i++] + off;
3354  else
3355  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3356  block[idx] = value * scale;
3357  if (!v->pquantizer)
3358  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3359  }
3360  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3361  if (i == 1)
3362  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3363  else
3364  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3365  }
3366  }
3367  break;
3368  }
3369  if (ttmb_out)
3370  *ttmb_out |= ttblk << (n * 4);
3371  return pat;
3372 }
3373  // Macroblock group
3375 
3376 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3377 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3378 
3380 {
3381  MpegEncContext *s = &v->s;
3382  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3383  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3384  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3385  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3386  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3387  uint8_t *dst;
3388 
3389  if (block_num > 3) {
3390  dst = s->dest[block_num - 3];
3391  } else {
3392  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3393  }
3394  if (s->mb_y != s->end_mb_y || block_num < 2) {
3395  int16_t (*mv)[2];
3396  int mv_stride;
3397 
3398  if (block_num > 3) {
3399  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3400  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3401  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3402  mv_stride = s->mb_stride;
3403  } else {
3404  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3405  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3406  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3407  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3408  mv_stride = s->b8_stride;
3409  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3410  }
3411 
3412  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3413  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3414  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3415  } else {
3416  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3417  if (idx == 3) {
3418  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3419  } else if (idx) {
3420  if (idx == 1)
3421  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3422  else
3423  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3424  }
3425  }
3426  }
3427 
3428  dst -= 4 * linesize;
3429  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3430  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3431  idx = (block_cbp | (block_cbp >> 2)) & 3;
3432  if (idx == 3) {
3433  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3434  } else if (idx) {
3435  if (idx == 1)
3436  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3437  else
3438  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3439  }
3440  }
3441 }
3442 
3444 {
3445  MpegEncContext *s = &v->s;
3446  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3447  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3448  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3449  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3450  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3451  uint8_t *dst;
3452 
3453  if (block_num > 3) {
3454  dst = s->dest[block_num - 3] - 8 * linesize;
3455  } else {
3456  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3457  }
3458 
3459  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3460  int16_t (*mv)[2];
3461 
3462  if (block_num > 3) {
3463  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3464  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3465  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3466  } else {
3467  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3468  : (mb_cbp >> ((block_num + 1) * 4));
3469  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3470  : (mb_is_intra >> ((block_num + 1) * 4));
3471  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3472  }
3473  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3474  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3475  } else {
3476  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3477  if (idx == 5) {
3478  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3479  } else if (idx) {
3480  if (idx == 1)
3481  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3482  else
3483  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3484  }
3485  }
3486  }
3487 
3488  dst -= 4;
3489  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3490  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3491  idx = (block_cbp | (block_cbp >> 1)) & 5;
3492  if (idx == 5) {
3493  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3494  } else if (idx) {
3495  if (idx == 1)
3496  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3497  else
3498  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3499  }
3500  }
3501 }
3502 
3504 {
3505  MpegEncContext *s = &v->s;
3506  int i;
3507 
3508  for (i = 0; i < 6; i++) {
3510  }
3511 
3512  /* V always precedes H, therefore we run H one MB before V;
3513  * at the end of a row, we catch up to complete the row */
3514  if (s->mb_x) {
3515  for (i = 0; i < 6; i++) {
3517  }
3518  if (s->mb_x == s->mb_width - 1) {
3519  s->mb_x++;
3521  for (i = 0; i < 6; i++) {
3523  }
3524  }
3525  }
3526 }
3527 
3531 {
3532  MpegEncContext *s = &v->s;
3533  GetBitContext *gb = &s->gb;
3534  int i, j;
3535  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3536  int cbp; /* cbp decoding stuff */
3537  int mqdiff, mquant; /* MB quantization */
3538  int ttmb = v->ttfrm; /* MB Transform type */
3539 
3540  int mb_has_coeffs = 1; /* last_flag */
3541  int dmv_x, dmv_y; /* Differential MV components */
3542  int index, index1; /* LUT indexes */
3543  int val, sign; /* temp values */
3544  int first_block = 1;
3545  int dst_idx, off;
3546  int skipped, fourmv;
3547  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3548 
3549  mquant = v->pq; /* lossy initialization */
3550 
3551  if (v->mv_type_is_raw)
3552  fourmv = get_bits1(gb);
3553  else
3554  fourmv = v->mv_type_mb_plane[mb_pos];
3555  if (v->skip_is_raw)
3556  skipped = get_bits1(gb);
3557  else
3558  skipped = v->s.mbskip_table[mb_pos];
3559 
3560  if (!fourmv) { /* 1MV mode */
3561  if (!skipped) {
3562  GET_MVDATA(dmv_x, dmv_y);
3563 
3564  if (s->mb_intra) {
3565  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3566  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3567  }
3569  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3570 
3571  /* FIXME Set DC val for inter block ? */
3572  if (s->mb_intra && !mb_has_coeffs) {
3573  GET_MQUANT();
3574  s->ac_pred = get_bits1(gb);
3575  cbp = 0;
3576  } else if (mb_has_coeffs) {
3577  if (s->mb_intra)
3578  s->ac_pred = get_bits1(gb);
3579  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3580  GET_MQUANT();
3581  } else {
3582  mquant = v->pq;
3583  cbp = 0;
3584  }
3585  s->current_picture.qscale_table[mb_pos] = mquant;
3586 
3587  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3588  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3589  VC1_TTMB_VLC_BITS, 2);
3590  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3591  dst_idx = 0;
3592  for (i = 0; i < 6; i++) {
3593  s->dc_val[0][s->block_index[i]] = 0;
3594  dst_idx += i >> 2;
3595  val = ((cbp >> (5 - i)) & 1);
3596  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3597  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3598  if (s->mb_intra) {
3599  /* check if prediction blocks A and C are available */
3600  v->a_avail = v->c_avail = 0;
3601  if (i == 2 || i == 3 || !s->first_slice_line)
3602  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3603  if (i == 1 || i == 3 || s->mb_x)
3604  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3605 
3606  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3607  (i & 4) ? v->codingset2 : v->codingset);
3608  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3609  continue;
3610  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3611  if (v->rangeredfrm)
3612  for (j = 0; j < 64; j++)
3613  s->block[i][j] <<= 1;
3615  s->dest[dst_idx] + off,
3616  i & 4 ? s->uvlinesize
3617  : s->linesize);
3618  if (v->pq >= 9 && v->overlap) {
3619  if (v->c_avail)
3620  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3621  if (v->a_avail)
3622  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3623  }
3624  block_cbp |= 0xF << (i << 2);
3625  block_intra |= 1 << i;
3626  } else if (val) {
3627  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3628  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3629  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3630  block_cbp |= pat << (i << 2);
3631  if (!v->ttmbf && ttmb < 8)
3632  ttmb = -1;
3633  first_block = 0;
3634  }
3635  }
3636  } else { // skipped
3637  s->mb_intra = 0;
3638  for (i = 0; i < 6; i++) {
3639  v->mb_type[0][s->block_index[i]] = 0;
3640  s->dc_val[0][s->block_index[i]] = 0;
3641  }
3642  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3643  s->current_picture.qscale_table[mb_pos] = 0;
3644  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3645  vc1_mc_1mv(v, 0);
3646  }
3647  } else { // 4MV mode
3648  if (!skipped /* unskipped MB */) {
3649  int intra_count = 0, coded_inter = 0;
3650  int is_intra[6], is_coded[6];
3651  /* Get CBPCY */
3652  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3653  for (i = 0; i < 6; i++) {
3654  val = ((cbp >> (5 - i)) & 1);
3655  s->dc_val[0][s->block_index[i]] = 0;
3656  s->mb_intra = 0;
3657  if (i < 4) {
3658  dmv_x = dmv_y = 0;
3659  s->mb_intra = 0;
3660  mb_has_coeffs = 0;
3661  if (val) {
3662  GET_MVDATA(dmv_x, dmv_y);
3663  }
3664  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3665  if (!s->mb_intra)
3666  vc1_mc_4mv_luma(v, i, 0, 0);
3667  intra_count += s->mb_intra;
3668  is_intra[i] = s->mb_intra;
3669  is_coded[i] = mb_has_coeffs;
3670  }
3671  if (i & 4) {
3672  is_intra[i] = (intra_count >= 3);
3673  is_coded[i] = val;
3674  }
3675  if (i == 4)
3676  vc1_mc_4mv_chroma(v, 0);
3677  v->mb_type[0][s->block_index[i]] = is_intra[i];
3678  if (!coded_inter)
3679  coded_inter = !is_intra[i] & is_coded[i];
3680  }
3681  // if there are no coded blocks then don't do anything more
3682  dst_idx = 0;
3683  if (!intra_count && !coded_inter)
3684  goto end;
3685  GET_MQUANT();
3686  s->current_picture.qscale_table[mb_pos] = mquant;
3687  /* test if block is intra and has pred */
3688  {
3689  int intrapred = 0;
3690  for (i = 0; i < 6; i++)
3691  if (is_intra[i]) {
3692  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3693  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3694  intrapred = 1;
3695  break;
3696  }
3697  }
3698  if (intrapred)
3699  s->ac_pred = get_bits1(gb);
3700  else
3701  s->ac_pred = 0;
3702  }
3703  if (!v->ttmbf && coded_inter)
3705  for (i = 0; i < 6; i++) {
3706  dst_idx += i >> 2;
3707  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3708  s->mb_intra = is_intra[i];
3709  if (is_intra[i]) {
3710  /* check if prediction blocks A and C are available */
3711  v->a_avail = v->c_avail = 0;
3712  if (i == 2 || i == 3 || !s->first_slice_line)
3713  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3714  if (i == 1 || i == 3 || s->mb_x)
3715  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3716 
3717  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3718  (i & 4) ? v->codingset2 : v->codingset);
3719  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3720  continue;
3721  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3722  if (v->rangeredfrm)
3723  for (j = 0; j < 64; j++)
3724  s->block[i][j] <<= 1;
3726  s->dest[dst_idx] + off,
3727  (i & 4) ? s->uvlinesize
3728  : s->linesize);
3729  if (v->pq >= 9 && v->overlap) {
3730  if (v->c_avail)
3731  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3732  if (v->a_avail)
3733  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3734  }
3735  block_cbp |= 0xF << (i << 2);
3736  block_intra |= 1 << i;
3737  } else if (is_coded[i]) {
3738  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3739  first_block, s->dest[dst_idx] + off,
3740  (i & 4) ? s->uvlinesize : s->linesize,
3741  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3742  &block_tt);
3743  block_cbp |= pat << (i << 2);
3744  if (!v->ttmbf && ttmb < 8)
3745  ttmb = -1;
3746  first_block = 0;
3747  }
3748  }
3749  } else { // skipped MB
3750  s->mb_intra = 0;
3751  s->current_picture.qscale_table[mb_pos] = 0;
3752  for (i = 0; i < 6; i++) {
3753  v->mb_type[0][s->block_index[i]] = 0;
3754  s->dc_val[0][s->block_index[i]] = 0;
3755  }
3756  for (i = 0; i < 4; i++) {
3757  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3758  vc1_mc_4mv_luma(v, i, 0, 0);
3759  }
3760  vc1_mc_4mv_chroma(v, 0);
3761  s->current_picture.qscale_table[mb_pos] = 0;
3762  }
3763  }
3764 end:
3765  v->cbp[s->mb_x] = block_cbp;
3766  v->ttblk[s->mb_x] = block_tt;
3767  v->is_intra[s->mb_x] = block_intra;
3768 
3769  return 0;
3770 }
3771 
3772 /* Decode one macroblock in an interlaced frame p picture */
3773 
3775 {
3776  MpegEncContext *s = &v->s;
3777  GetBitContext *gb = &s->gb;
3778  int i;
3779  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3780  int cbp = 0; /* cbp decoding stuff */
3781  int mqdiff, mquant; /* MB quantization */
3782  int ttmb = v->ttfrm; /* MB Transform type */
3783 
3784  int mb_has_coeffs = 1; /* last_flag */
3785  int dmv_x, dmv_y; /* Differential MV components */
3786  int val; /* temp value */
3787  int first_block = 1;
3788  int dst_idx, off;
3789  int skipped, fourmv = 0, twomv = 0;
3790  int block_cbp = 0, pat, block_tt = 0;
3791  int idx_mbmode = 0, mvbp;
3792  int stride_y, fieldtx;
3793 
3794  mquant = v->pq; /* Loosy initialization */
3795 
3796  if (v->skip_is_raw)
3797  skipped = get_bits1(gb);
3798  else
3799  skipped = v->s.mbskip_table[mb_pos];
3800  if (!skipped) {
3801  if (v->fourmvswitch)
3802  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3803  else
3804  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3805  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3806  /* store the motion vector type in a flag (useful later) */
3807  case MV_PMODE_INTFR_4MV:
3808  fourmv = 1;
3809  v->blk_mv_type[s->block_index[0]] = 0;
3810  v->blk_mv_type[s->block_index[1]] = 0;
3811  v->blk_mv_type[s->block_index[2]] = 0;
3812  v->blk_mv_type[s->block_index[3]] = 0;
3813  break;
3815  fourmv = 1;
3816  v->blk_mv_type[s->block_index[0]] = 1;
3817  v->blk_mv_type[s->block_index[1]] = 1;
3818  v->blk_mv_type[s->block_index[2]] = 1;
3819  v->blk_mv_type[s->block_index[3]] = 1;
3820  break;
3822  twomv = 1;
3823  v->blk_mv_type[s->block_index[0]] = 1;
3824  v->blk_mv_type[s->block_index[1]] = 1;
3825  v->blk_mv_type[s->block_index[2]] = 1;
3826  v->blk_mv_type[s->block_index[3]] = 1;
3827  break;
3828  case MV_PMODE_INTFR_1MV:
3829  v->blk_mv_type[s->block_index[0]] = 0;
3830  v->blk_mv_type[s->block_index[1]] = 0;
3831  v->blk_mv_type[s->block_index[2]] = 0;
3832  v->blk_mv_type[s->block_index[3]] = 0;
3833  break;
3834  }
3835  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3836  for (i = 0; i < 4; i++) {
3837  s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3838  s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3839  }
3840  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3841  s->mb_intra = v->is_intra[s->mb_x] = 1;
3842  for (i = 0; i < 6; i++)
3843  v->mb_type[0][s->block_index[i]] = 1;
3844  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3845  mb_has_coeffs = get_bits1(gb);
3846  if (mb_has_coeffs)
3847  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3848  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3849  GET_MQUANT();
3850  s->current_picture.qscale_table[mb_pos] = mquant;
3851  /* Set DC scale - y and c use the same (not sure if necessary here) */
3852  s->y_dc_scale = s->y_dc_scale_table[mquant];
3853  s->c_dc_scale = s->c_dc_scale_table[mquant];
3854  dst_idx = 0;
3855  for (i = 0; i < 6; i++) {
3856  s->dc_val[0][s->block_index[i]] = 0;
3857  dst_idx += i >> 2;
3858  val = ((cbp >> (5 - i)) & 1);
3859  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3860  v->a_avail = v->c_avail = 0;
3861  if (i == 2 || i == 3 || !s->first_slice_line)
3862  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3863  if (i == 1 || i == 3 || s->mb_x)
3864  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3865 
3866  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3867  (i & 4) ? v->codingset2 : v->codingset);
3868  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3869  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3870  if (i < 4) {
3871  stride_y = s->linesize << fieldtx;
3872  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3873  } else {
3874  stride_y = s->uvlinesize;
3875  off = 0;
3876  }
3878  s->dest[dst_idx] + off,
3879  stride_y);
3880  //TODO: loop filter
3881  }
3882 
3883  } else { // inter MB
3884  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3885  if (mb_has_coeffs)
3886  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3887  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3889  } else {
3890  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3891  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3893  }
3894  }
3895  s->mb_intra = v->is_intra[s->mb_x] = 0;
3896  for (i = 0; i < 6; i++)
3897  v->mb_type[0][s->block_index[i]] = 0;
3898  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3899  /* for all motion vector read MVDATA and motion compensate each block */
3900  dst_idx = 0;
3901  if (fourmv) {
3902  mvbp = v->fourmvbp;
3903  for (i = 0; i < 6; i++) {
3904  if (i < 4) {
3905  dmv_x = dmv_y = 0;
3906  val = ((mvbp >> (3 - i)) & 1);
3907  if (val) {
3908  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3909  }
3910  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3911  vc1_mc_4mv_luma(v, i, 0, 0);
3912  } else if (i == 4) {
3913  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3914  }
3915  }
3916  } else if (twomv) {
3917  mvbp = v->twomvbp;
3918  dmv_x = dmv_y = 0;
3919  if (mvbp & 2) {
3920  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3921  }
3922  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3923  vc1_mc_4mv_luma(v, 0, 0, 0);
3924  vc1_mc_4mv_luma(v, 1, 0, 0);
3925  dmv_x = dmv_y = 0;
3926  if (mvbp & 1) {
3927  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3928  }
3929  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3930  vc1_mc_4mv_luma(v, 2, 0, 0);
3931  vc1_mc_4mv_luma(v, 3, 0, 0);
3932  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3933  } else {
3934  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3935  dmv_x = dmv_y = 0;
3936  if (mvbp) {
3937  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3938  }
3939  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3940  vc1_mc_1mv(v, 0);
3941  }
3942  if (cbp)
3943  GET_MQUANT(); // p. 227
3944  s->current_picture.qscale_table[mb_pos] = mquant;
3945  if (!v->ttmbf && cbp)
3947  for (i = 0; i < 6; i++) {
3948  s->dc_val[0][s->block_index[i]] = 0;
3949  dst_idx += i >> 2;
3950  val = ((cbp >> (5 - i)) & 1);
3951  if (!fieldtx)
3952  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3953  else
3954  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3955  if (val) {
3956  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3957  first_block, s->dest[dst_idx] + off,
3958  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3959  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3960  block_cbp |= pat << (i << 2);
3961  if (!v->ttmbf && ttmb < 8)
3962  ttmb = -1;
3963  first_block = 0;
3964  }
3965  }
3966  }
3967  } else { // skipped
3968  s->mb_intra = v->is_intra[s->mb_x] = 0;
3969  for (i = 0; i < 6; i++) {
3970  v->mb_type[0][s->block_index[i]] = 0;
3971  s->dc_val[0][s->block_index[i]] = 0;
3972  }
3973  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3974  s->current_picture.qscale_table[mb_pos] = 0;
3975  v->blk_mv_type[s->block_index[0]] = 0;
3976  v->blk_mv_type[s->block_index[1]] = 0;
3977  v->blk_mv_type[s->block_index[2]] = 0;
3978  v->blk_mv_type[s->block_index[3]] = 0;
3979  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3980  vc1_mc_1mv(v, 0);
3981  }
3982  if (s->mb_x == s->mb_width - 1)
3983  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3984  return 0;
3985 }
3986 
3988 {
3989  MpegEncContext *s = &v->s;
3990  GetBitContext *gb = &s->gb;
3991  int i;
3992  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3993  int cbp = 0; /* cbp decoding stuff */
3994  int mqdiff, mquant; /* MB quantization */
3995  int ttmb = v->ttfrm; /* MB Transform type */
3996 
3997  int mb_has_coeffs = 1; /* last_flag */
3998  int dmv_x, dmv_y; /* Differential MV components */
3999  int val; /* temp values */
4000  int first_block = 1;
4001  int dst_idx, off;
4002  int pred_flag;
4003  int block_cbp = 0, pat, block_tt = 0;
4004  int idx_mbmode = 0;
4005 
4006  mquant = v->pq; /* Loosy initialization */
4007 
4008  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4009  if (idx_mbmode <= 1) { // intra MB
4010  s->mb_intra = v->is_intra[s->mb_x] = 1;
4011  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4012  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4013  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4014  GET_MQUANT();
4015  s->current_picture.qscale_table[mb_pos] = mquant;
4016  /* Set DC scale - y and c use the same (not sure if necessary here) */
4017  s->y_dc_scale = s->y_dc_scale_table[mquant];
4018  s->c_dc_scale = s->c_dc_scale_table[mquant];
4019  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4020  mb_has_coeffs = idx_mbmode & 1;
4021  if (mb_has_coeffs)
4022  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4023  dst_idx = 0;
4024  for (i = 0; i < 6; i++) {
4025  s->dc_val[0][s->block_index[i]] = 0;
4026  v->mb_type[0][s->block_index[i]] = 1;
4027  dst_idx += i >> 2;
4028  val = ((cbp >> (5 - i)) & 1);
4029  v->a_avail = v->c_avail = 0;
4030  if (i == 2 || i == 3 || !s->first_slice_line)
4031  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4032  if (i == 1 || i == 3 || s->mb_x)
4033  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4034 
4035  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4036  (i & 4) ? v->codingset2 : v->codingset);
4037  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4038  continue;
4039  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4040  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4042  s->dest[dst_idx] + off,
4043  (i & 4) ? s->uvlinesize
4044  : s->linesize);
4045  // TODO: loop filter
4046  }
4047  } else {
4048  s->mb_intra = v->is_intra[s->mb_x] = 0;
4049  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4050  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4051  if (idx_mbmode <= 5) { // 1-MV
4052  dmv_x = dmv_y = pred_flag = 0;
4053  if (idx_mbmode & 1) {
4054  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4055  }
4056  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4057  vc1_mc_1mv(v, 0);
4058  mb_has_coeffs = !(idx_mbmode & 2);
4059  } else { // 4-MV
4061  for (i = 0; i < 6; i++) {
4062  if (i < 4) {
4063  dmv_x = dmv_y = pred_flag = 0;
4064  val = ((v->fourmvbp >> (3 - i)) & 1);
4065  if (val) {
4066  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4067  }
4068  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4069  vc1_mc_4mv_luma(v, i, 0, 0);
4070  } else if (i == 4)
4071  vc1_mc_4mv_chroma(v, 0);
4072  }
4073  mb_has_coeffs = idx_mbmode & 1;
4074  }
4075  if (mb_has_coeffs)
4076  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4077  if (cbp) {
4078  GET_MQUANT();
4079  }
4080  s->current_picture.qscale_table[mb_pos] = mquant;
4081  if (!v->ttmbf && cbp) {
4083  }
4084  dst_idx = 0;
4085  for (i = 0; i < 6; i++) {
4086  s->dc_val[0][s->block_index[i]] = 0;
4087  dst_idx += i >> 2;
4088  val = ((cbp >> (5 - i)) & 1);
4089  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4090  if (val) {
4091  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4092  first_block, s->dest[dst_idx] + off,
4093  (i & 4) ? s->uvlinesize : s->linesize,
4094  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4095  &block_tt);
4096  block_cbp |= pat << (i << 2);
4097  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4098  first_block = 0;
4099  }
4100  }
4101  }
4102  if (s->mb_x == s->mb_width - 1)
4103  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4104  return 0;
4105 }
4106 
4110 {
4111  MpegEncContext *s = &v->s;
4112  GetBitContext *gb = &s->gb;
4113  int i, j;
4114  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4115  int cbp = 0; /* cbp decoding stuff */
4116  int mqdiff, mquant; /* MB quantization */
4117  int ttmb = v->ttfrm; /* MB Transform type */
4118  int mb_has_coeffs = 0; /* last_flag */
4119  int index, index1; /* LUT indexes */
4120  int val, sign; /* temp values */
4121  int first_block = 1;
4122  int dst_idx, off;
4123  int skipped, direct;
4124  int dmv_x[2], dmv_y[2];
4125  int bmvtype = BMV_TYPE_BACKWARD;
4126 
4127  mquant = v->pq; /* lossy initialization */
4128  s->mb_intra = 0;
4129 
4130  if (v->dmb_is_raw)
4131  direct = get_bits1(gb);
4132  else
4133  direct = v->direct_mb_plane[mb_pos];
4134  if (v->skip_is_raw)
4135  skipped = get_bits1(gb);
4136  else
4137  skipped = v->s.mbskip_table[mb_pos];
4138 
4139  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4140  for (i = 0; i < 6; i++) {
4141  v->mb_type[0][s->block_index[i]] = 0;
4142  s->dc_val[0][s->block_index[i]] = 0;
4143  }
4144  s->current_picture.qscale_table[mb_pos] = 0;
4145 
4146  if (!direct) {
4147  if (!skipped) {
4148  GET_MVDATA(dmv_x[0], dmv_y[0]);
4149  dmv_x[1] = dmv_x[0];
4150  dmv_y[1] = dmv_y[0];
4151  }
4152  if (skipped || !s->mb_intra) {
4153  bmvtype = decode012(gb);
4154  switch (bmvtype) {
4155  case 0:
4156  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4157  break;
4158  case 1:
4159  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4160  break;
4161  case 2:
4162  bmvtype = BMV_TYPE_INTERPOLATED;
4163  dmv_x[0] = dmv_y[0] = 0;
4164  }
4165  }
4166  }
4167  for (i = 0; i < 6; i++)
4168  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4169 
4170  if (skipped) {
4171  if (direct)
4172  bmvtype = BMV_TYPE_INTERPOLATED;
4173  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4174  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4175  return;
4176  }
4177  if (direct) {
4178  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4179  GET_MQUANT();
4180  s->mb_intra = 0;
4181  s->current_picture.qscale_table[mb_pos] = mquant;
4182  if (!v->ttmbf)
4184  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4185  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4186  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4187  } else {
4188  if (!mb_has_coeffs && !s->mb_intra) {
4189  /* no coded blocks - effectively skipped */
4190  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4191  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4192  return;
4193  }
4194  if (s->mb_intra && !mb_has_coeffs) {
4195  GET_MQUANT();
4196  s->current_picture.qscale_table[mb_pos] = mquant;
4197  s->ac_pred = get_bits1(gb);
4198  cbp = 0;
4199  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4200  } else {
4201  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4202  GET_MVDATA(dmv_x[0], dmv_y[0]);
4203  if (!mb_has_coeffs) {
4204  /* interpolated skipped block */
4205  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4206  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4207  return;
4208  }
4209  }
4210  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4211  if (!s->mb_intra) {
4212  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4213  }
4214  if (s->mb_intra)
4215  s->ac_pred = get_bits1(gb);
4216  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4217  GET_MQUANT();
4218  s->current_picture.qscale_table[mb_pos] = mquant;
4219  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4221  }
4222  }
4223  dst_idx = 0;
4224  for (i = 0; i < 6; i++) {
4225  s->dc_val[0][s->block_index[i]] = 0;
4226  dst_idx += i >> 2;
4227  val = ((cbp >> (5 - i)) & 1);
4228  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4229  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4230  if (s->mb_intra) {
4231  /* check if prediction blocks A and C are available */
4232  v->a_avail = v->c_avail = 0;
4233  if (i == 2 || i == 3 || !s->first_slice_line)
4234  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4235  if (i == 1 || i == 3 || s->mb_x)
4236  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4237 
4238  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4239  (i & 4) ? v->codingset2 : v->codingset);
4240  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4241  continue;
4242  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4243  if (v->rangeredfrm)
4244  for (j = 0; j < 64; j++)
4245  s->block[i][j] <<= 1;
4247  s->dest[dst_idx] + off,
4248  i & 4 ? s->uvlinesize
4249  : s->linesize);
4250  } else if (val) {
4251  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4252  first_block, s->dest[dst_idx] + off,
4253  (i & 4) ? s->uvlinesize : s->linesize,
4254  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4255  if (!v->ttmbf && ttmb < 8)
4256  ttmb = -1;
4257  first_block = 0;
4258  }
4259  }
4260 }
4261 
4265 {
4266  MpegEncContext *s = &v->s;
4267  GetBitContext *gb = &s->gb;
4268  int i, j;
4269  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4270  int cbp = 0; /* cbp decoding stuff */
4271  int mqdiff, mquant; /* MB quantization */
4272  int ttmb = v->ttfrm; /* MB Transform type */
4273  int mb_has_coeffs = 0; /* last_flag */
4274  int val; /* temp value */
4275  int first_block = 1;
4276  int dst_idx, off;
4277  int fwd;
4278  int dmv_x[2], dmv_y[2], pred_flag[2];
4279  int bmvtype = BMV_TYPE_BACKWARD;
4280  int idx_mbmode, interpmvp;
4281 
4282  mquant = v->pq; /* Loosy initialization */
4283  s->mb_intra = 0;
4284 
4285  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4286  if (idx_mbmode <= 1) { // intra MB
4287  s->mb_intra = v->is_intra[s->mb_x] = 1;
4288  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4289  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4290  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4291  GET_MQUANT();
4292  s->current_picture.qscale_table[mb_pos] = mquant;
4293  /* Set DC scale - y and c use the same (not sure if necessary here) */
4294  s->y_dc_scale = s->y_dc_scale_table[mquant];
4295  s->c_dc_scale = s->c_dc_scale_table[mquant];
4296  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4297  mb_has_coeffs = idx_mbmode & 1;
4298  if (mb_has_coeffs)
4299  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4300  dst_idx = 0;
4301  for (i = 0; i < 6; i++) {
4302  s->dc_val[0][s->block_index[i]] = 0;
4303  dst_idx += i >> 2;
4304  val = ((cbp >> (5 - i)) & 1);
4305  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4306  v->a_avail = v->c_avail = 0;
4307  if (i == 2 || i == 3 || !s->first_slice_line)
4308  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4309  if (i == 1 || i == 3 || s->mb_x)
4310  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4311 
4312  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4313  (i & 4) ? v->codingset2 : v->codingset);
4314  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4315  continue;
4316  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4317  if (v->rangeredfrm)
4318  for (j = 0; j < 64; j++)
4319  s->block[i][j] <<= 1;
4320  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4322  s->dest[dst_idx] + off,
4323  (i & 4) ? s->uvlinesize
4324  : s->linesize);
4325  // TODO: yet to perform loop filter
4326  }
4327  } else {
4328  s->mb_intra = v->is_intra[s->mb_x] = 0;
4329  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4330  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4331  if (v->fmb_is_raw)
4332  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4333  else
4334  fwd = v->forward_mb_plane[mb_pos];
4335  if (idx_mbmode <= 5) { // 1-MV
4336  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4337  pred_flag[0] = pred_flag[1] = 0;
4338  if (fwd)
4339  bmvtype = BMV_TYPE_FORWARD;
4340  else {
4341  bmvtype = decode012(gb);
4342  switch (bmvtype) {
4343  case 0:
4344  bmvtype = BMV_TYPE_BACKWARD;
4345  break;
4346  case 1:
4347  bmvtype = BMV_TYPE_DIRECT;
4348  break;
4349  case 2:
4350  bmvtype = BMV_TYPE_INTERPOLATED;
4351  interpmvp = get_bits1(gb);
4352  }
4353  }
4354  v->bmvtype = bmvtype;
4355  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4356  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4357  }
4358  if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4359  get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4360  }
4361  if (bmvtype == BMV_TYPE_DIRECT) {
4362  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4363  dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4364  }
4365  vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4366  vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4367  mb_has_coeffs = !(idx_mbmode & 2);
4368  } else { // 4-MV
4369  if (fwd)
4370  bmvtype = BMV_TYPE_FORWARD;
4371  v->bmvtype = bmvtype;
4373  for (i = 0; i < 6; i++) {
4374  if (i < 4) {
4375  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4376  dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4377  val = ((v->fourmvbp >> (3 - i)) & 1);
4378  if (val) {
4379  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4380  &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4381  &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4382  }
4383  vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4384  vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4385  } else if (i == 4)
4386  vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4387  }
4388  mb_has_coeffs = idx_mbmode & 1;
4389  }
4390  if (mb_has_coeffs)
4391  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4392  if (cbp) {
4393  GET_MQUANT();
4394  }
4395  s->current_picture.qscale_table[mb_pos] = mquant;
4396  if (!v->ttmbf && cbp) {
4398  }
4399  dst_idx = 0;
4400  for (i = 0; i < 6; i++) {
4401  s->dc_val[0][s->block_index[i]] = 0;
4402  dst_idx += i >> 2;
4403  val = ((cbp >> (5 - i)) & 1);
4404  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4405  if (val) {
4406  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4407  first_block, s->dest[dst_idx] + off,
4408  (i & 4) ? s->uvlinesize : s->linesize,
4409  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4410  if (!v->ttmbf && ttmb < 8)
4411  ttmb = -1;
4412  first_block = 0;
4413  }
4414  }
4415  }
4416 }
4417 
4421 {
4422  MpegEncContext *s = &v->s;
4423  GetBitContext *gb = &s->gb;
4424  int i, j;
4425  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4426  int cbp = 0; /* cbp decoding stuff */
4427  int mqdiff, mquant; /* MB quantization */
4428  int ttmb = v->ttfrm; /* MB Transform type */
4429  int mvsw = 0; /* motion vector switch */
4430  int mb_has_coeffs = 1; /* last_flag */
4431  int dmv_x, dmv_y; /* Differential MV components */
4432  int val; /* temp value */
4433  int first_block = 1;
4434  int dst_idx, off;
4435  int skipped, direct, twomv = 0;
4436  int block_cbp = 0, pat, block_tt = 0;
4437  int idx_mbmode = 0, mvbp;
4438  int stride_y, fieldtx;
4439  int bmvtype = BMV_TYPE_BACKWARD;
4440  int dir, dir2;
4441 
4442  mquant = v->pq; /* Lossy initialization */
4443  s->mb_intra = 0;
4444  if (v->skip_is_raw)
4445  skipped = get_bits1(gb);
4446  else
4447  skipped = v->s.mbskip_table[mb_pos];
4448 
4449  if (!skipped) {
4450  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4451  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4452  twomv = 1;
4453  v->blk_mv_type[s->block_index[0]] = 1;
4454  v->blk_mv_type[s->block_index[1]] = 1;
4455  v->blk_mv_type[s->block_index[2]] = 1;
4456  v->blk_mv_type[s->block_index[3]] = 1;
4457  } else {
4458  v->blk_mv_type[s->block_index[0]] = 0;
4459  v->blk_mv_type[s->block_index[1]] = 0;
4460  v->blk_mv_type[s->block_index[2]] = 0;
4461  v->blk_mv_type[s->block_index[3]] = 0;
4462  }
4463  }
4464 
4465  if (v->dmb_is_raw)
4466  direct = get_bits1(gb);
4467  else
4468  direct = v->direct_mb_plane[mb_pos];
4469 
4470  if (direct) {
4471  s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4472  s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4473  s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4474  s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4475 
4476  if (twomv) {
4477  s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4478  s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4479  s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4480  s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4481 
4482  for (i = 1; i < 4; i += 2) {
4483  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4484  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4485  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4486  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4487  }
4488  } else {
4489  for (i = 1; i < 4; i++) {
4490  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4491  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4492  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4493  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4494  }
4495  }
4496  }
4497 
4498  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4499  for (i = 0; i < 4; i++) {
4500  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4501  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4502  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4503  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4504  }
4505  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4506  s->mb_intra = v->is_intra[s->mb_x] = 1;
4507  for (i = 0; i < 6; i++)
4508  v->mb_type[0][s->block_index[i]] = 1;
4509  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4510  mb_has_coeffs = get_bits1(gb);
4511  if (mb_has_coeffs)
4512  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4513  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4514  GET_MQUANT();
4515  s->current_picture.qscale_table[mb_pos] = mquant;
4516  /* Set DC scale - y and c use the same (not sure if necessary here) */
4517  s->y_dc_scale = s->y_dc_scale_table[mquant];
4518  s->c_dc_scale = s->c_dc_scale_table[mquant];
4519  dst_idx = 0;
4520  for (i = 0; i < 6; i++) {
4521  s->dc_val[0][s->block_index[i]] = 0;
4522  dst_idx += i >> 2;
4523  val = ((cbp >> (5 - i)) & 1);
4524  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4525  v->a_avail = v->c_avail = 0;
4526  if (i == 2 || i == 3 || !s->first_slice_line)
4527  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4528  if (i == 1 || i == 3 || s->mb_x)
4529  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4530 
4531  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4532  (i & 4) ? v->codingset2 : v->codingset);
4533  if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4534  continue;
4535  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4536  if (i < 4) {
4537  stride_y = s->linesize << fieldtx;
4538  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4539  } else {
4540  stride_y = s->uvlinesize;
4541  off = 0;
4542  }
4544  s->dest[dst_idx] + off,
4545  stride_y);
4546  }
4547  } else {
4548  s->mb_intra = v->is_intra[s->mb_x] = 0;
4549  if (!direct) {
4550  if (skipped || !s->mb_intra) {
4551  bmvtype = decode012(gb);
4552  switch (bmvtype) {
4553  case 0:
4554  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4555  break;
4556  case 1:
4557  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4558  break;
4559  case 2:
4560  bmvtype = BMV_TYPE_INTERPOLATED;
4561  }
4562  }
4563 
4564  if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4565  mvsw = get_bits1(gb);
4566  }
4567 
4568  if (!skipped) { // inter MB
4569  mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4570  if (mb_has_coeffs)
4571  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4572  if (!direct) {
4573  if (bmvtype == (BMV_TYPE_INTERPOLATED & twomv)) {
4575  } else if (bmvtype == (BMV_TYPE_INTERPOLATED | twomv)) {
4577  }
4578  }
4579 
4580  for (i = 0; i < 6; i++)
4581  v->mb_type[0][s->block_index[i]] = 0;
4582  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4583  /* for all motion vector read MVDATA and motion compensate each block */
4584  dst_idx = 0;
4585  if (direct) {
4586  if (twomv) {
4587  for (i = 0; i < 4; i++) {
4588  vc1_mc_4mv_luma(v, i, 0, 0);
4589  vc1_mc_4mv_luma(v, i, 1, 1);
4590  }
4591  vc1_mc_4mv_chroma4(v, 0, 0, 0);
4592  vc1_mc_4mv_chroma4(v, 1, 1, 1);
4593  } else {
4594  vc1_mc_1mv(v, 0);
4595  vc1_interp_mc(v);
4596  }
4597  } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4598  mvbp = v->fourmvbp;
4599  for (i = 0; i < 4; i++) {
4600  dir = i==1 || i==3;
4601  dmv_x = dmv_y = 0;
4602  val = ((mvbp >> (3 - i)) & 1);
4603  if (val)
4604  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4605  j = i > 1 ? 2 : 0;
4606  vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4607  vc1_mc_4mv_luma(v, j, dir, dir);
4608  vc1_mc_4mv_luma(v, j+1, dir, dir);
4609  }
4610 
4611  vc1_mc_4mv_chroma4(v, 0, 0, 0);
4612  vc1_mc_4mv_chroma4(v, 1, 1, 1);
4613  } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4614  mvbp = v->twomvbp;
4615  dmv_x = dmv_y = 0;
4616  if (mvbp & 2)
4617  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4618 
4619  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4620  vc1_mc_1mv(v, 0);
4621 
4622  dmv_x = dmv_y = 0;
4623  if (mvbp & 1)
4624  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4625 
4626  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4627  vc1_interp_mc(v);
4628  } else if (twomv) {
4629  dir = bmvtype == BMV_TYPE_BACKWARD;
4630  dir2 = dir;
4631  if (mvsw)
4632  dir2 = !dir;
4633  mvbp = v->twomvbp;
4634  dmv_x = dmv_y = 0;
4635  if (mvbp & 2)
4636  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4637  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4638 
4639  dmv_x = dmv_y = 0;
4640  if (mvbp & 1)
4641  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4642  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4643 
4644  if (mvsw) {
4645  for (i = 0; i < 2; i++) {
4646  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4647  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4648  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4649  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4650  }
4651  } else {
4652  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4653  vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4654  }
4655 
4656  vc1_mc_4mv_luma(v, 0, dir, 0);
4657  vc1_mc_4mv_luma(v, 1, dir, 0);
4658  vc1_mc_4mv_luma(v, 2, dir2, 0);
4659  vc1_mc_4mv_luma(v, 3, dir2, 0);
4660  vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4661  } else {
4662  dir = bmvtype == BMV_TYPE_BACKWARD;
4663 
4664  mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4665  dmv_x = dmv_y = 0;
4666  if (mvbp)
4667  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4668 
4669  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4670  v->blk_mv_type[s->block_index[0]] = 1;
4671  v->blk_mv_type[s->block_index[1]] = 1;
4672  v->blk_mv_type[s->block_index[2]] = 1;
4673  v->blk_mv_type[s->block_index[3]] = 1;
4674  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4675  for (i = 0; i < 2; i++) {
4676  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4677  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4678  }
4679  vc1_mc_1mv(v, dir);
4680  }
4681 
4682  if (cbp)
4683  GET_MQUANT(); // p. 227
4684  s->current_picture.qscale_table[mb_pos] = mquant;
4685  if (!v->ttmbf && cbp)
4687  for (i = 0; i < 6; i++) {
4688  s->dc_val[0][s->block_index[i]] = 0;
4689  dst_idx += i >> 2;
4690  val = ((cbp >> (5 - i)) & 1);
4691  if (!fieldtx)
4692  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4693  else
4694  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4695  if (val) {
4696  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4697  first_block, s->dest[dst_idx] + off,
4698  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4699  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4700  block_cbp |= pat << (i << 2);
4701  if (!v->ttmbf && ttmb < 8)
4702  ttmb = -1;
4703  first_block = 0;
4704  }
4705  }
4706 
4707  } else { // skipped
4708  dir = 0;
4709  for (i = 0; i < 6; i++) {
4710  v->mb_type[0][s->block_index[i]] = 0;
4711  s->dc_val[0][s->block_index[i]] = 0;
4712  }
4713  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4714  s->current_picture.qscale_table[mb_pos] = 0;
4715  v->blk_mv_type[s->block_index[0]] = 0;
4716  v->blk_mv_type[s->block_index[1]] = 0;
4717  v->blk_mv_type[s->block_index[2]] = 0;
4718  v->blk_mv_type[s->block_index[3]] = 0;
4719 
4720  if (!direct) {
4721  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4722  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4723  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4724  } else {
4725  dir = bmvtype == BMV_TYPE_BACKWARD;
4726  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4727  if (mvsw) {
4728  int dir2 = dir;
4729  if (mvsw)
4730  dir2 = !dir;
4731  for (i = 0; i < 2; i++) {
4732  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4733  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4734  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4735  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4736  }
4737  } else {
4738  v->blk_mv_type[s->block_index[0]] = 1;
4739  v->blk_mv_type[s->block_index[1]] = 1;
4740  v->blk_mv_type[s->block_index[2]] = 1;
4741  v->blk_mv_type[s->block_index[3]] = 1;
4742  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4743  for (i = 0; i < 2; i++) {
4744  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4745  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4746  }
4747  }
4748  }
4749  }
4750 
4751  vc1_mc_1mv(v, dir);
4752  if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4753  vc1_interp_mc(v);
4754  }
4755  }
4756  }
4757  if (s->mb_x == s->mb_width - 1)
4758  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4759  v->cbp[s->mb_x] = block_cbp;
4760  v->ttblk[s->mb_x] = block_tt;
4761  return 0;
4762 }
4763 
4767 {
4768  int k, j;
4769  MpegEncContext *s = &v->s;
4770  int cbp, val;
4771  uint8_t *coded_val;
4772  int mb_pos;
4773 
4774  /* select codingmode used for VLC tables selection */
4775  switch (v->y_ac_table_index) {
4776  case 0:
4778  break;
4779  case 1:
4781  break;
4782  case 2:
4784  break;
4785  }
4786 
4787  switch (v->c_ac_table_index) {
4788  case 0:
4790  break;
4791  case 1:
4793  break;
4794  case 2:
4796  break;
4797  }
4798 
4799  /* Set DC scale - y and c use the same */
4800  s->y_dc_scale = s->y_dc_scale_table[v->pq];
4801  s->c_dc_scale = s->c_dc_scale_table[v->pq];
4802 
4803  //do frame decode
4804  s->mb_x = s->mb_y = 0;
4805  s->mb_intra = 1;
4806  s->first_slice_line = 1;
4807  for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4808  s->mb_x = 0;
4809  init_block_index(v);
4810  for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4811  uint8_t *dst[6];
4813  dst[0] = s->dest[0];
4814  dst[1] = dst[0] + 8;
4815  dst[2] = s->dest[0] + s->linesize * 8;
4816  dst[3] = dst[2] + 8;
4817  dst[4] = s->dest[1];
4818  dst[5] = s->dest[2];
4819  s->bdsp.clear_blocks(s->block[0]);
4820  mb_pos = s->mb_x + s->mb_y * s->mb_width;
4821  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4822  s->current_picture.qscale_table[mb_pos] = v->pq;
4823  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4824  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4825 
4826  // do actual MB decoding and displaying
4828  v->s.ac_pred = get_bits1(&v->s.gb);
4829 
4830  for (k = 0; k < 6; k++) {
4831  val = ((cbp >> (5 - k)) & 1);
4832 
4833  if (k < 4) {
4834  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4835  val = val ^ pred;
4836  *coded_val = val;
4837  }
4838  cbp |= val << (5 - k);
4839 
4840  vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4841 
4842  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4843  continue;
4844  v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4845  if (v->pq >= 9 && v->overlap) {
4846  if (v->rangeredfrm)
4847  for (j = 0; j < 64; j++)
4848  s->block[k][j] <<= 1;
4849  s->idsp.put_signed_pixels_clamped(s->block[k], dst[k],
4850  k & 4 ? s->uvlinesize
4851  : s->linesize);
4852  } else {
4853  if (v->rangeredfrm)
4854  for (j = 0; j < 64; j++)
4855  s->block[k][j] = (s->block[k][j] - 64) << 1;
4856  s->idsp.put_pixels_clamped(s->block[k], dst[k],
4857  k & 4 ? s->uvlinesize
4858  : s->linesize);
4859  }
4860  }
4861 
4862  if (v->pq >= 9 && v->overlap) {
4863  if (s->mb_x) {
4864  v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4865  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4866  if (!(s->flags & CODEC_FLAG_GRAY)) {
4867  v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4868  v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4869  }
4870  }
4871  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4872  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4873  if (!s->first_slice_line) {
4874  v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4875  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4876  if (!(s->flags & CODEC_FLAG_GRAY)) {
4877  v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4878  v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4879  }
4880  }
4881  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4882  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4883  }
4884  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4885 
4886  if (get_bits_count(&s->gb) > v->bits) {
4887  ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4888  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4889  get_bits_count(&s->gb), v->bits);
4890  return;
4891  }
4892  }
4893  if (!v->s.loop_filter)
4894  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4895  else if (s->mb_y)
4896  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4897 
4898  s->first_slice_line = 0;
4899  }
4900  if (v->s.loop_filter)
4901  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4902 
4903  /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4904  * profile, these only differ are when decoding MSS2 rectangles. */
4905  ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4906 }
4907 
4911 {
4912  int k;
4913  MpegEncContext *s = &v->s;
4914  int cbp, val;
4915  uint8_t *coded_val;
4916  int mb_pos;
4917  int mquant = v->pq;
4918  int mqdiff;
4919  GetBitContext *gb = &s->gb;
4920 
4921  /* select codingmode used for VLC tables selection */
4922  switch (v->y_ac_table_index) {
4923  case 0:
4925  break;
4926  case 1:
4928  break;
4929  case 2:
4931  break;
4932  }
4933 
4934  switch (v->c_ac_table_index) {
4935  case 0:
4937  break;
4938  case 1:
4940  break;
4941  case 2:
4943  break;
4944  }
4945 
4946  // do frame decode
4947  s->mb_x = s->mb_y = 0;
4948  s->mb_intra = 1;
4949  s->first_slice_line = 1;
4950  s->mb_y = s->start_mb_y;
4951  if (s->start_mb_y) {
4952  s->mb_x = 0;
4953  init_block_index(v);
4954  memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4955  (1 + s->b8_stride) * sizeof(*s->coded_block));
4956  }
4957  for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4958  s->mb_x = 0;
4959  init_block_index(v);
4960  for (;s->mb_x < s->mb_width; s->mb_x++) {
4961  int16_t (*block)[64] = v->block[v->cur_blk_idx];
4963  s->bdsp.clear_blocks(block[0]);
4964  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4965  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4966  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4967  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4968 
4969  // do actual MB decoding and displaying
4970  if (v->fieldtx_is_raw)
4971  v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4973  if ( v->acpred_is_raw)
4974  v->s.ac_pred = get_bits1(&v->s.gb);
4975  else
4976  v->s.ac_pred = v->acpred_plane[mb_pos];
4977 
4978  if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4979  v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4980 
4981  GET_MQUANT();
4982 
4983  s->current_picture.qscale_table[mb_pos] = mquant;
4984  /* Set DC scale - y and c use the same */
4985  s->y_dc_scale = s->y_dc_scale_table[mquant];
4986  s->c_dc_scale = s->c_dc_scale_table[mquant];
4987 
4988  for (k = 0; k < 6; k++) {
4989  val = ((cbp >> (5 - k)) & 1);
4990 
4991  if (k < 4) {
4992  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4993  val = val ^ pred;
4994  *coded_val = val;
4995  }
4996  cbp |= val << (5 - k);
4997 
4998  v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4999  v->c_avail = !!s->mb_x || (k == 1 || k == 3);
5000 
5001  vc1_decode_i_block_adv(v, block[k], k, val,
5002  (k < 4) ? v->codingset : v->codingset2, mquant);
5003 
5004  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
5005  continue;
5007  }
5008 
5012 
5013  if (get_bits_count(&s->gb) > v->bits) {
5014  // TODO: may need modification to handle slice coding
5015  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5016  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
5017  get_bits_count(&s->gb), v->bits);
5018  return;
5019  }
5020  }
5021  if (!v->s.loop_filter)
5022  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5023  else if (s->mb_y)
5024  ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
5025  s->first_slice_line = 0;
5026  }
5027 
5028  /* raw bottom MB row */
5029  s->mb_x = 0;
5030  init_block_index(v);
5031 
5032  for (;s->mb_x < s->mb_width; s->mb_x++) {
5035  if (v->s.loop_filter)
5037  }
5038  if (v->s.loop_filter)
5039  ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5040  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5041  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5042 }
5043 
5045 {
5046  MpegEncContext *s = &v->s;
5047  int apply_loop_filter;
5048 
5049  /* select codingmode used for VLC tables selection */
5050  switch (v->c_ac_table_index) {
5051  case 0:
5053  break;
5054  case 1:
5056  break;
5057  case 2:
5059  break;
5060  }
5061 
5062  switch (v->c_ac_table_index) {
5063  case 0:
5065  break;
5066  case 1:
5068  break;
5069  case 2:
5071  break;
5072  }
5073 
5074  apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5075  v->fcm == PROGRESSIVE;
5076  s->first_slice_line = 1;
5077  memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5078  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5079  s->mb_x = 0;
5080  init_block_index(v);
5081  for (; s->mb_x < s->mb_width; s->mb_x++) {
5083 
5084  if (v->fcm == ILACE_FIELD)
5086  else if (v->fcm == ILACE_FRAME)
5088  else vc1_decode_p_mb(v);
5089  if (s->mb_y != s->start_mb_y && apply_loop_filter)
5091  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5092  // TODO: may need modification to handle slice coding
5093  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5094  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5095  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5096  return;
5097  }
5098  }
5099  memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5100  memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5101  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5102  memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5103  if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5104  s->first_slice_line = 0;
5105  }
5106  if (apply_loop_filter) {
5107  s->mb_x = 0;
5108  init_block_index(v);
5109  for (; s->mb_x < s->mb_width; s->mb_x++) {
5112  }
5113  }
5114  if (s->end_mb_y >= s->start_mb_y)
5115  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5116  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5117  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5118 }
5119 
5121 {
5122  MpegEncContext *s = &v->s;
5123 
5124  /* select codingmode used for VLC tables selection */
5125  switch (v->c_ac_table_index) {
5126  case 0:
5128  break;
5129  case 1:
5131  break;
5132  case 2:
5134  break;
5135  }
5136 
5137  switch (v->c_ac_table_index) {
5138  case 0:
5140  break;
5141  case 1:
5143  break;
5144  case 2:
5146  break;
5147  }
5148 
5149  s->first_slice_line = 1;
5150  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5151  s->mb_x = 0;
5152  init_block_index(v);
5153  for (; s->mb_x < s->mb_width; s->mb_x++) {
5155 
5156  if (v->fcm == ILACE_FIELD)
5158  else if (v->fcm == ILACE_FRAME)
5160  else
5161  vc1_decode_b_mb(v);
5162  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5163  // TODO: may need modification to handle slice coding
5164  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5165  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5166  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5167  return;
5168  }
5169  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5170  }
5171  if (!v->s.loop_filter)
5172  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5173  else if (s->mb_y)
5174  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5175  s->first_slice_line = 0;
5176  }
5177  if (v->s.loop_filter)
5178  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5179  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5180  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5181 }
5182 
5184 {
5185  MpegEncContext *s = &v->s;
5186 
5187  if (!v->s.last_picture.f->data[0])
5188  return;
5189 
5190  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5191  s->first_slice_line = 1;
5192  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5193  s->mb_x = 0;
5194  init_block_index(v);
5196  memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5197  memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5198  memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5199  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5200  s->first_slice_line = 0;
5201  }
5203 }
5204 
5206 {
5207 
5208  v->s.esc3_level_length = 0;
5209  if (v->x8_type) {
5210  ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5211  } else {
5212  v->cur_blk_idx = 0;
5213  v->left_blk_idx = -1;
5214  v->topleft_blk_idx = 1;
5215  v->top_blk_idx = 2;
5216  switch (v->s.pict_type) {
5217  case AV_PICTURE_TYPE_I:
5218  if (v->profile == PROFILE_ADVANCED)
5220  else
5222  break;
5223  case AV_PICTURE_TYPE_P:
5224  if (v->p_frame_skipped)
5226  else
5228  break;
5229  case AV_PICTURE_TYPE_B:
5230  if (v->bi_type) {
5231  if (v->profile == PROFILE_ADVANCED)
5233  else
5235  } else
5237  break;
5238  }
5239  }
5240 }
5241 
5242 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5243 
5244 typedef struct {
5256  int coefs[2][7];
5257 
5258  int effect_type, effect_flag;
5259  int effect_pcount1, effect_pcount2;
5260  int effect_params1[15], effect_params2[10];
5261 } SpriteData;
5262 
5263 static inline int get_fp_val(GetBitContext* gb)
5264 {
5265  return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5266 }
5267 
5268 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5269 {
5270  c[1] = c[3] = 0;
5271 
5272  switch (get_bits(gb, 2)) {
5273  case 0:
5274  c[0] = 1 << 16;
5275  c[2] = get_fp_val(gb);
5276  c[4] = 1 << 16;
5277  break;
5278  case 1:
5279  c[0] = c[4] = get_fp_val(gb);
5280  c[2] = get_fp_val(gb);
5281  break;
5282  case 2:
5283  c[0] = get_fp_val(gb);
5284  c[2] = get_fp_val(gb);
5285  c[4] = get_fp_val(gb);
5286  break;
5287  case 3:
5288  c[0] = get_fp_val(gb);
5289  c[1] = get_fp_val(gb);
5290  c[2] = get_fp_val(gb);
5291  c[3] = get_fp_val(gb);
5292  c[4] = get_fp_val(gb);
5293  break;
5294  }
5295  c[5] = get_fp_val(gb);
5296  if (get_bits1(gb))
5297  c[6] = get_fp_val(gb);
5298  else
5299  c[6] = 1 << 16;
5300 }
5301 
5302 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5303 {
5304  AVCodecContext *avctx = v->s.avctx;
5305  int sprite, i;
5306 
5307  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5308  vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5309  if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5310  avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5311  av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5312  for (i = 0; i < 7; i++)
5313  av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5314  sd->coefs[sprite][i] / (1<<16),
5315  (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5316  av_log(avctx, AV_LOG_DEBUG, "\n");
5317  }
5318 
5319  skip_bits(gb, 2);
5320  if (sd->effect_type = get_bits_long(gb, 30)) {
5321  switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5322  case 7:
5323  vc1_sprite_parse_transform(gb, sd->effect_params1);
5324  break;
5325  case 14:
5326  vc1_sprite_parse_transform(gb, sd->effect_params1);
5327  vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5328  break;
5329  default:
5330  for (i = 0; i < sd->effect_pcount1; i++)
5331  sd->effect_params1[i] = get_fp_val(gb);
5332  }
5333  if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5334  // effect 13 is simple alpha blending and matches the opacity above
5335  av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5336  for (i = 0; i < sd->effect_pcount1; i++)
5337  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5338  sd->effect_params1[i] / (1 << 16),
5339  (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5340  av_log(avctx, AV_LOG_DEBUG, "\n");
5341  }
5342 
5343  sd->effect_pcount2 = get_bits(gb, 16);
5344  if (sd->effect_pcount2 > 10) {
5345  av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5346  return;
5347  } else if (sd->effect_pcount2) {
5348  i = -1;
5349  av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5350  while (++i < sd->effect_pcount2) {
5351  sd->effect_params2[i] = get_fp_val(gb);
5352  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5353  sd->effect_params2[i] / (1 << 16),
5354  (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5355  }
5356  av_log(avctx, AV_LOG_DEBUG, "\n");
5357  }
5358  }
5359  if (sd->effect_flag = get_bits1(gb))
5360  av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5361 
5362  if (get_bits_count(gb) >= gb->size_in_bits +
5363  (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
5364  av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5365  if (get_bits_count(gb) < gb->size_in_bits - 8)
5366  av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5367 }
5368 
5369 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5370 {
5371  int i, plane, row, sprite;
5372  int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5373  uint8_t* src_h[2][2];
5374  int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5375  int ysub[2];
5376  MpegEncContext *s = &v->s;
5377 
5378  for (i = 0; i < 2; i++) {
5379  xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5380  xadv[i] = sd->coefs[i][0];
5381  if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5382  xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5383 
5384  yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5385  yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5386  }
5387  alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5388 
5389  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5390  int width = v->output_width>>!!plane;
5391 
5392  for (row = 0; row < v->output_height>>!!plane; row++) {
5393  uint8_t *dst = v->sprite_output_frame->data[plane] +
5394  v->sprite_output_frame->linesize[plane] * row;
5395 
5396  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5397  uint8_t *iplane = s->current_picture.f->data[plane];
5398  int iline = s->current_picture.f->linesize[plane];
5399  int ycoord = yoff[sprite] + yadv[sprite] * row;
5400  int yline = ycoord >> 16;
5401  int next_line;
5402  ysub[sprite] = ycoord & 0xFFFF;
5403  if (sprite) {
5404  iplane = s->last_picture.f->data[plane];
5405  iline = s->last_picture.f->linesize[plane];
5406  }
5407  next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5408  if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5409  src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5410  if (ysub[sprite])
5411  src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5412  } else {
5413  if (sr_cache[sprite][0] != yline) {
5414  if (sr_cache[sprite][1] == yline) {
5415  FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5416  FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5417  } else {
5418  v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5419  sr_cache[sprite][0] = yline;
5420  }
5421  }
5422  if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5423  v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5424  iplane + next_line, xoff[sprite],
5425  xadv[sprite], width);
5426  sr_cache[sprite][1] = yline + 1;
5427  }
5428  src_h[sprite][0] = v->sr_rows[sprite][0];
5429  src_h[sprite][1] = v->sr_rows[sprite][1];
5430  }
5431  }
5432 
5433  if (!v->two_sprites) {
5434  if (ysub[0]) {
5435  v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5436  } else {
5437  memcpy(dst, src_h[0][0], width);
5438  }
5439  } else {
5440  if (ysub[0] && ysub[1]) {
5441  v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5442  src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5443  } else if (ysub[0]) {
5444  v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5445  src_h[1][0], alpha, width);
5446  } else if (ysub[1]) {
5447  v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5448  src_h[0][0], (1<<16)-1-alpha, width);
5449  } else {
5450  v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5451  }
5452  }
5453  }
5454 
5455  if (!plane) {
5456  for (i = 0; i < 2; i++) {
5457  xoff[i] >>= 1;
5458  yoff[i] >>= 1;
5459  }
5460  }
5461 
5462  }
5463 }
5464 
5465 
5466 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5467 {
5468  MpegEncContext *s = &v->s;
5469  AVCodecContext *avctx = s->avctx;
5470  SpriteData sd;
5471 
5472  vc1_parse_sprites(v, gb, &sd);
5473 
5474  if (!s->current_picture.f->data[0]) {
5475  av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5476  return -1;
5477  }
5478 
5479  if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f->data[0])) {
5480  av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5481  v->two_sprites = 0;
5482  }
5483 
5485  if (ff_get_buffer(avctx, v->sprite_output_frame, 0) < 0) {
5486  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5487  return -1;
5488  }
5489 
5490  vc1_draw_sprites(v, &sd);
5491 
5492  return 0;
5493 }
5494 
5495 static void vc1_sprite_flush(AVCodecContext *avctx)
5496 {
5497  VC1Context *v = avctx->priv_data;
5498  MpegEncContext *s = &v->s;
5499  AVFrame *f = s->current_picture.f;
5500  int plane, i;
5501 
5502  /* Windows Media Image codecs have a convergence interval of two keyframes.
5503  Since we can't enforce it, clear to black the missing sprite. This is
5504  wrong but it looks better than doing nothing. */
5505 
5506  if (f->data[0])
5507  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5508  for (i = 0; i < v->sprite_height>>!!plane; i++)
5509  memset(f->data[plane] + i * f->linesize[plane],
5510  plane ? 128 : 0, f->linesize[plane]);
5511 }
5512 
5513 #endif
5514 
5516 {
5517  MpegEncContext *s = &v->s;
5518  int i;
5519  int mb_height = FFALIGN(s->mb_height, 2);
5520 
5521  /* Allocate mb bitplanes */
5522  v->mv_type_mb_plane = av_malloc (s->mb_stride * mb_height);
5523  v->direct_mb_plane = av_malloc (s->mb_stride * mb_height);
5524  v->forward_mb_plane = av_malloc (s->mb_stride * mb_height);
5525  v->fieldtx_plane = av_mallocz(s->mb_stride * mb_height);
5526  v->acpred_plane = av_malloc (s->mb_stride * mb_height);
5527  v->over_flags_plane = av_malloc (s->mb_stride * mb_height);
5528 
5529  v->n_allocated_blks = s->mb_width + 2;
5530  v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5531  v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5532  v->cbp = v->cbp_base + s->mb_stride;
5533  v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5534  v->ttblk = v->ttblk_base + s->mb_stride;
5535  v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5536  v->is_intra = v->is_intra_base + s->mb_stride;
5537  v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5538  v->luma_mv = v->luma_mv_base + s->mb_stride;
5539 
5540  /* allocate block type info in that way so it could be used with s->block_index[] */
5541  v->mb_type_base = av_malloc(s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5542  v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5543  v->mb_type[1] = v->mb_type_base + s->b8_stride * (mb_height * 2 + 1) + s->mb_stride + 1;
5544  v->mb_type[2] = v->mb_type[1] + s->mb_stride * (mb_height + 1);
5545 
5546  /* allocate memory to store block level MV info */
5547  v->blk_mv_type_base = av_mallocz( s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5548  v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5549  v->mv_f_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5550  v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5551  v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5552  v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5553  v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5554  v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5555 
5556  /* Init coded blocks info */
5557  if (v->profile == PROFILE_ADVANCED) {
5558 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5559 // return -1;
5560 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5561 // return -1;
5562  }
5563 
5564  ff_intrax8_common_init(&v->x8,s);
5565 
5567  for (i = 0; i < 4; i++)
5568  if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5569  }
5570 
5571  if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5572  !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5573  !v->mb_type_base) {
5576  av_freep(&v->acpred_plane);
5578  av_freep(&v->block);
5579  av_freep(&v->cbp_base);
5580  av_freep(&v->ttblk_base);
5581  av_freep(&v->is_intra_base);
5582  av_freep(&v->luma_mv_base);
5583  av_freep(&v->mb_type_base);
5584  return AVERROR(ENOMEM);
5585  }
5586 
5587  return 0;
5588 }
5589 
5591 {
5592  int i;
5593  for (i = 0; i < 64; i++) {
5594 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5595  v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5596  v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5597  v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5598  v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5600  }
5601  v->left_blk_sh = 0;
5602  v->top_blk_sh = 3;
5603 }
5604 
5610 {
5611  VC1Context *v = avctx->priv_data;
5612  MpegEncContext *s = &v->s;
5613  GetBitContext gb;
5614 
5615  /* save the container output size for WMImage */
5616  v->output_width = avctx->width;
5617  v->output_height = avctx->height;
5618 
5619  if (!avctx->extradata_size || !avctx->extradata)
5620  return -1;
5621  if (!(avctx->flags & CODEC_FLAG_GRAY))
5622  avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts);
5623  else
5624  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5625  v->s.avctx = avctx;
5626 
5627  if (ff_vc1_init_common(v) < 0)
5628  return -1;
5629  ff_blockdsp_init(&s->bdsp, avctx);
5631  ff_qpeldsp_init(&s->qdsp);
5632 
5633  if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5634  int count = 0;
5635 
5636  // looks like WMV3 has a sequence header stored in the extradata
5637  // advanced sequence header may be before the first frame
5638  // the last byte of the extradata is a version number, 1 for the
5639  // samples we can decode
5640 
5641  init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5642 
5643  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5644  return -1;
5645 
5646  count = avctx->extradata_size*8 - get_bits_count(&gb);
5647  if (count > 0) {
5648  av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5649  count, get_bits(&gb, count));
5650  } else if (count < 0) {
5651  av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5652  }
5653  } else { // VC1/WVC1/WVP2
5654  const uint8_t *start = avctx->extradata;
5655  uint8_t *end = avctx->extradata + avctx->extradata_size;
5656  const uint8_t *next;
5657  int size, buf2_size;
5658  uint8_t *buf2 = NULL;
5659  int seq_initialized = 0, ep_initialized = 0;
5660 
5661  if (avctx->extradata_size < 16) {
5662  av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5663  return -1;
5664  }
5665 
5667  start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5668  next = start;
5669  for (; next < end; start = next) {
5670  next = find_next_marker(start + 4, end);
5671  size = next - start - 4;
5672  if (size <= 0)
5673  continue;
5674  buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5675  init_get_bits(&gb, buf2, buf2_size * 8);
5676  switch (AV_RB32(start)) {
5677  case VC1_CODE_SEQHDR:
5678  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5679  av_free(buf2);
5680  return -1;
5681  }
5682  seq_initialized = 1;
5683  break;
5684  case VC1_CODE_ENTRYPOINT:
5685  if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5686  av_free(buf2);
5687  return -1;
5688  }
5689  ep_initialized = 1;
5690  break;
5691  }
5692  }
5693  av_free(buf2);
5694  if (!seq_initialized || !ep_initialized) {
5695  av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5696  return -1;
5697  }
5698  v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5699  }
5700 
5702  if (!v->sprite_output_frame)
5703  return AVERROR(ENOMEM);
5704 
5705  avctx->profile = v->profile;
5706  if (v->profile == PROFILE_ADVANCED)
5707  avctx->level = v->level;
5708 
5709  avctx->has_b_frames = !!avctx->max_b_frames;
5710 
5711  s->mb_width = (avctx->coded_width + 15) >> 4;
5712  s->mb_height = (avctx->coded_height + 15) >> 4;
5713 
5714  if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5716  } else {
5717  memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5718  v->left_blk_sh = 3;
5719  v->top_blk_sh = 0;
5720  }
5721 
5722  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5723  v->sprite_width = avctx->coded_width;
5724  v->sprite_height = avctx->coded_height;
5725 
5726  avctx->coded_width = avctx->width = v->output_width;
5727  avctx->coded_height = avctx->height = v->output_height;
5728 
5729  // prevent 16.16 overflows
5730  if (v->sprite_width > 1 << 14 ||
5731  v->sprite_height > 1 << 14 ||
5732  v->output_width > 1 << 14 ||
5733  v->output_height > 1 << 14) return -1;
5734  }
5735  return 0;
5736 }
5737 
5742 {
5743  VC1Context *v = avctx->priv_data;
5744  int i;
5745 
5747 
5748  for (i = 0; i < 4; i++)
5749  av_freep(&v->sr_rows[i >> 1][i & 1]);
5750  av_freep(&v->hrd_rate);
5751  av_freep(&v->hrd_buffer);
5752  ff_mpv_common_end(&v->s);
5756  av_freep(&v->fieldtx_plane);
5757  av_freep(&v->acpred_plane);
5759  av_freep(&v->mb_type_base);
5761  av_freep(&v->mv_f_base);
5762  av_freep(&v->mv_f_next_base);
5763  av_freep(&v->block);
5764  av_freep(&v->cbp_base);
5765  av_freep(&v->ttblk_base);
5766  av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5767  av_freep(&v->luma_mv_base);
5769  return 0;
5770 }
5771 
5772 
5776 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5777  int *got_frame, AVPacket *avpkt)
5778 {
5779  const uint8_t *buf = avpkt->data;
5780  int buf_size = avpkt->size, n_slices = 0, i, ret;
5781  VC1Context *v = avctx->priv_data;
5782  MpegEncContext *s = &v->s;
5783  AVFrame *pict = data;
5784  uint8_t *buf2 = NULL;
5785  const uint8_t *buf_start = buf;
5786  int mb_height, n_slices1;
5787  struct {
5788  uint8_t *buf;
5789  GetBitContext gb;
5790  int mby_start;
5791  } *slices = NULL, *tmp;
5792 
5793  /* no supplementary picture */
5794  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5795  /* special case for last picture */
5796  if (s->low_delay == 0 && s->next_picture_ptr) {
5797  if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
5798  return ret;
5799  s->next_picture_ptr = NULL;
5800 
5801  *got_frame = 1;
5802  }
5803 
5804  return 0;
5805  }
5806 
5807  //for advanced profile we may need to parse and unescape data
5808  if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5809  int buf_size2 = 0;
5810  buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5811 
5812  if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5813  const uint8_t *start, *end, *next;
5814  int size;
5815 
5816  next = buf;
5817  for (start = buf, end = buf + buf_size; next < end; start = next) {
5818  next = find_next_marker(start + 4, end);
5819  size = next - start - 4;
5820  if (size <= 0) continue;
5821  switch (AV_RB32(start)) {
5822  case VC1_CODE_FRAME:
5823  if (avctx->hwaccel)
5824  buf_start = start;
5825  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5826  break;
5827  case VC1_CODE_FIELD: {
5828  int buf_size3;
5829  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5830  if (!tmp)
5831  goto err;
5832  slices = tmp;
5833  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5834  if (!slices[n_slices].buf)
5835  goto err;
5836  buf_size3 = vc1_unescape_buffer(start + 4, size,
5837  slices[n_slices].buf);
5838  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5839  buf_size3 << 3);
5840  /* assuming that the field marker is at the exact middle,
5841  hope it's correct */
5842  slices[n_slices].mby_start = s->mb_height >> 1;
5843  n_slices1 = n_slices - 1; // index of the last slice of the first field
5844  n_slices++;
5845  break;
5846  }
5847  case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5848  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5849  init_get_bits(&s->gb, buf2, buf_size2 * 8);
5850  ff_vc1_decode_entry_point(avctx, v, &s->gb);
5851  break;
5852  case VC1_CODE_SLICE: {
5853  int buf_size3;
5854  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5855  if (!tmp)
5856  goto err;
5857  slices = tmp;
5858  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5859  if (!slices[n_slices].buf)
5860  goto err;
5861  buf_size3 = vc1_unescape_buffer(start + 4, size,
5862  slices[n_slices].buf);
5863  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5864  buf_size3 << 3);
5865  slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5866  n_slices++;
5867  break;
5868  }
5869  }
5870  }
5871  } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5872  const uint8_t *divider;
5873  int buf_size3;
5874 
5875  divider = find_next_marker(buf, buf + buf_size);
5876  if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5877  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5878  goto err;
5879  } else { // found field marker, unescape second field
5880  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5881  if (!tmp)
5882  goto err;
5883  slices = tmp;
5884  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5885  if (!slices[n_slices].buf)
5886  goto err;
5887  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5888  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5889  buf_size3 << 3);
5890  slices[n_slices].mby_start = s->mb_height >> 1;
5891  n_slices1 = n_slices - 1;
5892  n_slices++;
5893  }
5894  buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5895  } else {
5896  buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5897  }
5898  init_get_bits(&s->gb, buf2, buf_size2*8);
5899  } else
5900  init_get_bits(&s->gb, buf, buf_size*8);
5901 
5902  if (v->res_sprite) {
5903  v->new_sprite = !get_bits1(&s->gb);
5904  v->two_sprites = get_bits1(&s->gb);
5905  /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5906  we're using the sprite compositor. These are intentionally kept separate
5907  so you can get the raw sprites by using the wmv3 decoder for WMVP or
5908  the vc1 one for WVP2 */
5909  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5910  if (v->new_sprite) {
5911  // switch AVCodecContext parameters to those of the sprites
5912  avctx->width = avctx->coded_width = v->sprite_width;
5913  avctx->height = avctx->coded_height = v->sprite_height;
5914  } else {
5915  goto image;
5916  }
5917  }
5918  }
5919 
5920  if (s->context_initialized &&
5921  (s->width != avctx->coded_width ||
5922  s->height != avctx->coded_height)) {
5923  ff_vc1_decode_end(avctx);
5924  }
5925 
5926  if (!s->context_initialized) {
5927  if (ff_msmpeg4_decode_init(avctx) < 0)
5928  goto err;
5929  if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5930  ff_mpv_common_end(s);
5931  goto err;
5932  }
5933 
5934  s->low_delay = !avctx->has_b_frames || v->res_sprite;
5935 
5936  if (v->profile == PROFILE_ADVANCED) {
5937  s->h_edge_pos = avctx->coded_width;
5938  s->v_edge_pos = avctx->coded_height;
5939  }
5940  }
5941 
5942  // do parse frame header
5943  v->pic_header_flag = 0;
5944  v->first_pic_header_flag = 1;
5945  if (v->profile < PROFILE_ADVANCED) {
5946  if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5947  goto err;
5948  }
5949  } else {
5950  if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5951  goto err;
5952  }
5953  }
5954  v->first_pic_header_flag = 0;
5955 
5956  if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5957  && s->pict_type != AV_PICTURE_TYPE_I) {
5958  av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5959  goto err;
5960  }
5961 
5962  // for skipping the frame
5965 
5966  /* skip B-frames if we don't have reference frames */
5967  if (!s->last_picture_ptr && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
5968  goto end;
5969  }
5970  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5971  (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5972  avctx->skip_frame >= AVDISCARD_ALL) {
5973  goto end;
5974  }
5975 
5976  if (s->next_p_frame_damaged) {
5977  if (s->pict_type == AV_PICTURE_TYPE_B)
5978  goto end;
5979  else
5980  s->next_p_frame_damaged = 0;
5981  }
5982 
5983  if (ff_mpv_frame_start(s, avctx) < 0) {
5984  goto err;
5985  }
5986 
5987  // process pulldown flags
5989  // Pulldown flags are only valid when 'broadcast' has been set.
5990  // So ticks_per_frame will be 2
5991  if (v->rff) {
5992  // repeat field
5994  } else if (v->rptfrm) {
5995  // repeat frames
5996  s->current_picture_ptr->f->repeat_pict = v->rptfrm * 2;
5997  }
5998 
6001 
6002  if (avctx->hwaccel) {
6003  if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
6004  goto err;
6005  if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6006  goto err;
6007  if (avctx->hwaccel->end_frame(avctx) < 0)
6008  goto err;
6009  } else {
6010  int header_ret = 0;
6011 
6013 
6014  v->bits = buf_size * 8;
6015  v->end_mb_x = s->mb_width;
6016  if (v->field_mode) {
6017  s->current_picture.f->linesize[0] <<= 1;
6018  s->current_picture.f->linesize[1] <<= 1;
6019  s->current_picture.f->linesize[2] <<= 1;
6020  s->linesize <<= 1;
6021  s->uvlinesize <<= 1;
6022  }
6023  mb_height = s->mb_height >> v->field_mode;
6024 
6025  if (!mb_height) {
6026  av_log(v->s.avctx, AV_LOG_ERROR, "Invalid mb_height.\n");
6027  goto err;
6028  }
6029 
6030  for (i = 0; i <= n_slices; i++) {
6031  if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6032  if (v->field_mode <= 0) {
6033  av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6034  "picture boundary (%d >= %d)\n", i,
6035  slices[i - 1].mby_start, mb_height);
6036  continue;
6037  }
6038  v->second_field = 1;
6039  v->blocks_off = s->mb_width * s->mb_height << 1;
6040  v->mb_off = s->mb_stride * s->mb_height >> 1;
6041  } else {
6042  v->second_field = 0;
6043  v->blocks_off = 0;
6044  v->mb_off = 0;
6045  }
6046  if (i) {
6047  v->pic_header_flag = 0;
6048  if (v->field_mode && i == n_slices1 + 2) {
6049  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6050  av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6051  if (avctx->err_recognition & AV_EF_EXPLODE)
6052  goto err;
6053  continue;
6054  }
6055  } else if (get_bits1(&s->gb)) {
6056  v->pic_header_flag = 1;
6057  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6058  av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6059  if (avctx->err_recognition & AV_EF_EXPLODE)
6060  goto err;
6061  continue;
6062  }
6063  }
6064  }
6065  if (header_ret < 0)
6066  continue;
6067  s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6068  if (!v->field_mode || v->second_field)
6069  s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6070  else
6071  s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6073  if (i != n_slices)
6074  s->gb = slices[i].gb;
6075  }
6076  if (v->field_mode) {
6077  v->second_field = 0;
6078  s->current_picture.f->linesize[0] >>= 1;
6079  s->current_picture.f->linesize[1] >>= 1;
6080  s->current_picture.f->linesize[2] >>= 1;
6081  s->linesize >>= 1;
6082  s->uvlinesize >>= 1;
6084  FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6085  FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6086  }
6087  }
6088  av_dlog(s->avctx, "Consumed %i/%i bits\n",
6089  get_bits_count(&s->gb), s->gb.size_in_bits);
6090 // if (get_bits_count(&s->gb) > buf_size * 8)
6091 // return -1;
6092  if (!v->field_mode)
6093  ff_er_frame_end(&s->er);
6094  }
6095 
6096  ff_mpv_frame_end(s);
6097 
6098  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6099 image:
6100  avctx->width = avctx->coded_width = v->output_width;
6101  avctx->height = avctx->coded_height = v->output_height;
6102  if (avctx->skip_frame >= AVDISCARD_NONREF)
6103  goto end;
6104 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6105  if (vc1_decode_sprites(v, &s->gb))
6106  goto err;
6107 #endif
6108  if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6109  goto err;
6110  *got_frame = 1;
6111  } else {
6112  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6113  if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
6114  goto err;
6116  *got_frame = 1;
6117  } else if (s->last_picture_ptr) {
6118  if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
6119  goto err;
6121  *got_frame = 1;
6122  }
6123  }
6124 
6125 end:
6126  av_free(buf2);
6127  for (i = 0; i < n_slices; i++)
6128  av_free(slices[i].buf);
6129  av_free(slices);
6130  return buf_size;
6131 
6132 err:
6133  av_free(buf2);
6134  for (i = 0; i < n_slices; i++)
6135  av_free(slices[i].buf);
6136  av_free(slices);
6137  return -1;
6138 }
6139 
6140 
6141 static const AVProfile profiles[] = {
6142  { FF_PROFILE_VC1_SIMPLE, "Simple" },
6143  { FF_PROFILE_VC1_MAIN, "Main" },
6144  { FF_PROFILE_VC1_COMPLEX, "Complex" },
6145  { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6146  { FF_PROFILE_UNKNOWN },
6147 };
6148 
6150 #if CONFIG_VC1_DXVA2_HWACCEL
6152 #endif
6153 #if CONFIG_VC1_VAAPI_HWACCEL
6155 #endif
6156 #if CONFIG_VC1_VDPAU_HWACCEL
6158 #endif
6161 };
6162 
6164  .name = "vc1",
6165  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6166  .type = AVMEDIA_TYPE_VIDEO,
6167  .id = AV_CODEC_ID_VC1,
6168  .priv_data_size = sizeof(VC1Context),
6169  .init = vc1_decode_init,
6172  .flush = ff_mpeg_flush,
6173  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6174  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6175  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6176 };
6177 
6178 #if CONFIG_WMV3_DECODER
6179 AVCodec ff_wmv3_decoder = {
6180  .name = "wmv3",
6181  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6182  .type = AVMEDIA_TYPE_VIDEO,
6183  .id = AV_CODEC_ID_WMV3,
6184  .priv_data_size = sizeof(VC1Context),
6185  .init = vc1_decode_init,
6188  .flush = ff_mpeg_flush,
6189  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6190  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6191  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6192 };
6193 #endif
6194 
6195 #if CONFIG_WMV3IMAGE_DECODER
6196 AVCodec ff_wmv3image_decoder = {
6197  .name = "wmv3image",
6198  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6199  .type = AVMEDIA_TYPE_VIDEO,
6200  .id = AV_CODEC_ID_WMV3IMAGE,
6201  .priv_data_size = sizeof(VC1Context),
6202  .init = vc1_decode_init,
6205  .capabilities = CODEC_CAP_DR1,
6206  .flush = vc1_sprite_flush,
6207  .pix_fmts = (const enum AVPixelFormat[]) {
6210  },
6211 };
6212 #endif
6213 
6214 #if CONFIG_VC1IMAGE_DECODER
6215 AVCodec ff_vc1image_decoder = {
6216  .name = "vc1image",
6217  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6218  .type = AVMEDIA_TYPE_VIDEO,
6219  .id = AV_CODEC_ID_VC1IMAGE,
6220  .priv_data_size = sizeof(VC1Context),
6221  .init = vc1_decode_init,
6224  .capabilities = CODEC_CAP_DR1,
6225  .flush = vc1_sprite_flush,
6226  .pix_fmts = (const enum AVPixelFormat[]) {
6229  },
6230 };
6231 #endif
static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
Definition: vc1dec.c:265
in the bitstream is reported as 00b
Definition: vc1.h:173
const int ff_vc1_ttblk_to_tt[3][8]
Table for conversion between TTBLK and TTMB.
Definition: vc1data.c:34
op_pixels_func avg_vc1_mspel_pixels_tab[16]
Definition: vc1dsp.h:59
#define VC1_TTBLK_VLC_BITS
Definition: vc1data.c:126
IDCTDSPContext idsp
Definition: mpegvideo.h:354
void(* vc1_h_overlap)(uint8_t *src, int stride)
Definition: vc1dsp.h:45
const struct AVCodec * codec
Definition: avcodec.h:1059
int topleft_blk_idx
Definition: vc1.h:393
#define MB_TYPE_SKIP
Definition: avcodec.h:786
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:62
static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n, int coded, int codingset)
Decode intra block in intra frames - should be faster than decode_intra_block.
Definition: vc1dec.c:2632
discard all frames except keyframes
Definition: avcodec.h:567
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2368
#define VC1_IF_MBMODE_VLC_BITS
Definition: vc1data.c:145
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2956
BI type.
Definition: avutil.h:259
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
int p_frame_skipped
Definition: vc1.h:388
Imode
Imode types.
Definition: vc1.c:54
static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n, int mquant, int ttmb, int first_block, uint8_t *dst, int linesize, int skip_block, int *ttmb_out)
Decode P block.
Definition: vc1dec.c:3217
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
The VC1 Context.
Definition: vc1.h:182
int size
int esc3_level_length
Definition: mpegvideo.h:550
This structure describes decoded (raw) audio or video data.
Definition: frame.h:135
VLC ff_vc1_ttblk_vlc[3]
Definition: vc1data.c:127
#define VC1_ICBPCY_VLC_BITS
Definition: vc1data.c:120
static int vc1_decode_p_mb(VC1Context *v)
Decode one P-frame MB.
Definition: vc1dec.c:3530
int k_x
Number of bits for MVs (depends on MV range)
Definition: vc1.h:243
void(* vc1_inv_trans_8x4)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:37
int reffield
if numref = 0 (1 reference) then reffield decides which
Definition: vc1.h:366
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:279
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:314
uint8_t * mv_f_base
Definition: vc1.h:358
void(* vc1_inv_trans_4x8)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:38
void(* clear_block)(int16_t *block)
Definition: blockdsp.h:35
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1234
int mv_type_is_raw
mv type mb plane is not coded
Definition: vc1.h:297
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static av_always_inline int scaleforsame(VC1Context *v, int i, int n, int dim, int dir)
Definition: vc1dec.c:1375
uint8_t dmvrange
Frame decoding info for interlaced picture.
Definition: vc1.h:343
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:129
#define ER_MB_END
#define AC_VLC_BITS
Definition: intrax8.c:37
static av_always_inline int scaleforopp_y(VC1Context *v, int n, int dir)
Definition: vc1dec.c:1344
static const uint8_t vc1_index_decode_table[AC_MODES][185][2]
Definition: vc1acdata.h:34
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:280
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1309
int16_t(*[3] ac_val)[16]
used for for mpeg4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:320
static const int vc1_last_decode_table[AC_MODES]
Definition: vc1acdata.h:30
int tt_index
Index for Transform Type tables (to decode TTMB)
Definition: vc1.h:293
static void vc1_decode_p_blocks(VC1Context *v)
Definition: vc1dec.c:5044
static void vc1_put_signed_blocks_clamped(VC1Context *v)
Definition: vc1dec.c:91
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:258
#define GET_MVDATA(_dmv_x, _dmv_y)
Get MV differentials.
Definition: vc1dec.c:1140
#define VC1_2REF_MVDATA_VLC_BITS
Definition: vc1data.c:140
void ff_er_frame_end(ERContext *s)
static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V) ...
Definition: vc1dec.c:980
void(* sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, int alpha, int width)
Definition: vc1dsp.h:69
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:315
int next_use_ic
Definition: vc1.h:305
int size
Definition: avcodec.h:974
void(* clear_blocks)(int16_t *blocks)
Definition: blockdsp.h:36
uint8_t rangeredfrm
Frame decoding info for S/M profiles only.
Definition: vc1.h:310
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
Definition: pixfmt.h:127
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
Definition: msmpeg4dec.c:281
#define MB_TYPE_INTRA
Definition: mpegutils.h:69
void ff_print_debug_info(MpegEncContext *s, Picture *p)
Print debugging info for the given picture.
Definition: mpegvideo.c:1910
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1254
int frfd
Definition: vc1.h:375
void(* add_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: idctdsp.h:59
uint8_t zz_8x8[4][64]
Zigzag table for TT_8x8, permuted for IDCT.
Definition: vc1.h:247
static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n, int coded, int mquant, int codingset)
Decode intra block in inter frames - more generic version than vc1_decode_i_block.
Definition: vc1dec.c:3007
static void vc1_decode_b_blocks(VC1Context *v)
Definition: vc1dec.c:5120
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:39
mpegvideo header.
int top_blk_idx
Definition: vc1.h:393
IntraX8Context x8
Definition: vc1.h:184
VLC * imv_vlc
Definition: vc1.h:349
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
uint8_t * mb_type_base
Definition: vc1.h:272
discard all
Definition: avcodec.h:568
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
Definition: vc1.h:358
int sprite_height
Definition: vc1.h:384
uint8_t run
Definition: svq3.c:146
int last_use_ic
Definition: vc1.h:305
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:230
int end_mb_x
Horizontal macroblock limit (used only by mss2)
Definition: vc1.h:401
int profile
profile
Definition: avcodec.h:2622
QpelDSPContext qdsp
Definition: mpegvideo.h:359
AVCodec.
Definition: avcodec.h:2796
void(* vc1_v_loop_filter8)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:50
void ff_vc1_decode_blocks(VC1Context *v)
Definition: vc1dec.c:5205
int block_wrap[6]
Definition: mpegvideo.h:416
#define FFALIGN(x, a)
Definition: common.h:62
uint8_t rff
Definition: vc1.h:319
static void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
Definition: vc1dec.c:2322
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
static int vc1_coded_block_pred(MpegEncContext *s, int n, uint8_t **coded_block_ptr)
Definition: vc1dec.c:2530
enum AVDiscard skip_frame
Definition: avcodec.h:2727
int bits
Definition: vc1.h:188
int range_x
Definition: vc1.h:245
#define VC1_4MV_BLOCK_PATTERN_VLC_BITS
Definition: vc1data.c:122
static void vc1_apply_p_loop_filter(VC1Context *v)
Definition: vc1dec.c:3503
const uint16_t ff_vc1_b_field_mvpred_scales[7][4]
Definition: vc1data.c:1121
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2432
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2361
int esc3_run_length
Definition: mpegvideo.h:551
int refdist
distance of the current picture from reference
Definition: vc1.h:363
uint8_t * acpred_plane
AC prediction flags bitplane.
Definition: vc1.h:329
VC-1 tables.
int bi_type
Definition: vc1.h:389
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:275
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static const AVProfile profiles[]
Definition: vc1dec.c:6141
uint8_t bits
Definition: crc.c:251
uint8_t
static int vc1_decode_b_mb_intfr(VC1Context *v)
Decode one B-frame MB (in interlaced frame B picture)
Definition: vc1dec.c:4420
#define av_cold
Definition: attributes.h:66
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:57
void(* vc1_v_loop_filter4)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:48
int first_pic_header_flag
Definition: vc1.h:376
uint16_t * hrd_rate
Definition: vc1.h:334
av_cold int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
Definition: vc1.c:1568
void(* sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, const uint8_t *src2b, int offset2, int alpha, int width)
Definition: vc1dsp.h:71
void(* vc1_inv_trans_8x8)(int16_t *b)
Definition: vc1dsp.h:36
#define DC_VLC_BITS
Definition: vc1dec.c:49
int left_blk_idx
Definition: vc1.h:393
#define AV_RB32
Definition: intreadwrite.h:130
int interlace
Progressive/interlaced (RPTFTM syntax element)
Definition: vc1.h:210
int y_ac_table_index
Luma index from AC2FRM element.
Definition: vc1.h:263
#define b
Definition: input.c:52
int second_field
Definition: vc1.h:362
#define ER_MB_ERROR
int n_allocated_blks
Definition: vc1.h:393
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:191
int c_ac_table_index
AC coding set indexes.
Definition: vc1.h:262
const int ff_vc1_ac_sizes[AC_MODES]
Definition: vc1data.c:1133
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:188
void(* vc1_inv_trans_8x4_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:41
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1164
int ttfrm
Transform type info present at frame level.
Definition: vc1.h:265
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:306
int codingset2
index of current table set from 11.8 to use for chroma block decoding
Definition: vc1.h:269
int16_t bfraction
Relative position % anchors=> how to scale MVs.
Definition: vc1.h:280
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:684
int16_t((* luma_mv)[2]
Definition: vc1.h:396
quarterpel DSP functions
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags...
Definition: vc1.h:227
const char data[16]
Definition: mxf.c:70
static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
Definition: vc1dec.c:197
MSMPEG4 data tables.
uint8_t * data
Definition: avcodec.h:973
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:194
void(* vc1_h_loop_filter8)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:51
static av_always_inline int scaleforsame_x(VC1Context *v, int n, int dir)
Definition: vc1dec.c:1247
uint8_t * forward_mb_plane
bitplane for "forward" MBs
Definition: vc1.h:296
uint8_t last_luty[2][256]
Definition: vc1.h:301
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:255
int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Decode Simple/Main Profiles sequence header.
Definition: vc1.c:294
static void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
Reconstruct motion vector for B-frame and do motion compensation.
Definition: vc1dec.c:2092
#define B
Definition: huffyuv.h:49
int fieldtx_is_raw
Definition: vc1.h:355
uint8_t * over_flags_plane
Overflags bitplane.
Definition: vc1.h:331
static void vc1_decode_b_mb(VC1Context *v)
Decode one B-frame MB (in Main profile)
Definition: vc1dec.c:4109
uint8_t fourmvbp
Definition: vc1.h:353
const int8_t ff_vc1_adv_interlaced_4x8_zz[32]
Definition: vc1data.c:1065
int range_y
MV range.
Definition: vc1.h:245
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:756
uint8_t last_lutuv[2][256]
lookup tables used for intensity compensation
Definition: vc1.h:301
uint8_t ttmbf
Transform type flag.
Definition: vc1.h:266
Definition: vc1.h:143
int k_y
Number of bits for MVs (depends on MV range)
Definition: vc1.h:244
Definition: vf_drawbox.c:37
#define transpose(x)
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:555
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:313
uint8_t twomvbp
Definition: vc1.h:352
int dmb_is_raw
direct mb plane is raw
Definition: vc1.h:298
static int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value for I-frames only prediction dir: left=0, top=1.
Definition: vc1dec.c:2388
int16_t(* block)[6][64]
Definition: vc1.h:392
void(* vc1_inv_trans_8x8_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:40
#define VC1_CBPCY_P_VLC_BITS
Definition: vc1data.c:118
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1339
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:186
int overlap
overlapped transforms in use
Definition: vc1.h:234
in the bitstream is reported as 11b
Definition: vc1.h:175
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:713
const int8_t ff_vc1_simple_progressive_4x4_zz[16]
Definition: vc1data.c:1022
void(* vc1_inv_trans_4x4)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:39
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:69
static void vc1_mc_1mv(VC1Context *v, int dir)
Do motion compensation over 1 macroblock Mostly adapted hpel_motion and qpel_motion from mpegvideo...
Definition: vc1dec.c:345
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:150
ERContext er
Definition: mpegvideo.h:638
static av_cold int vc1_decode_init(AVCodecContext *avctx)
Initialize a VC1/WMV3 decoder.
Definition: vc1dec.c:5609
#define GET_MQUANT()
Get macroblock-level quantizer scale.
Definition: vc1dec.c:1098
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:144
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:327
uint8_t * mv_f_next_base
Definition: vc1.h:359
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1144
VLC * mbmode_vlc
Definition: vc1.h:348
#define wrap(func)
Definition: neontest.h:62
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:168
const char * name
Name of the codec implementation.
Definition: avcodec.h:2803
#define IS_MARKER(state, i, buf, buf_size)
Definition: dca_parser.c:37
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:514
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:519
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
GetBitContext gb
Definition: mpegvideo.h:558
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1478
#define FFMAX(a, b)
Definition: common.h:55
void(* vc1_v_loop_filter16)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:52
static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n, int dir)
Definition: vc1dec.c:1280
static void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, int mvn, int r_x, int r_y, uint8_t *is_intra, int dir)
Predict and set motion vector for interlaced frame picture MBs.
Definition: vc1dec.c:1676
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
Definition: vc1.h:249
int res_rtm_flag
reserved, set to 1
Definition: vc1.h:200
void(* vc1_inv_trans_4x4_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:43
void(* put_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: idctdsp.h:53
int a_avail
Definition: vc1.h:271
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
Definition: vc1.h:357
static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
Decode one AC coefficient.
Definition: vc1dec.c:2566
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2429
void(* put_signed_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: idctdsp.h:56
const int8_t ff_vc1_adv_interlaced_4x4_zz[16]
Definition: vc1data.c:1076
int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:839
#define B_FRACTION_DEN
Definition: vc1data.h:99
VLC ff_vc1_ttmb_vlc[3]
Definition: vc1data.c:115
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:531
static av_always_inline int scaleforopp(VC1Context *v, int n, int dim, int dir)
Definition: vc1dec.c:1396
int cur_field_type
0: top, 1: bottom
Definition: vc1.h:370
const uint8_t ff_wmv1_scantable[WMV1_SCANTABLE_COUNT][64]
Definition: msmpeg4data.c:1825
VLC * twomvbp_vlc
Definition: vc1.h:350
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
Definition: vc1.h:250
AVCodec ff_vc1_decoder
Definition: vc1dec.c:6163
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2817
static av_always_inline int scaleforopp_x(VC1Context *v, int n)
Definition: vc1dec.c:1317
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:196
av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
Definition: vc1dec.c:5741
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2406
int x8_type
Definition: vc1.h:390
#define FFMIN(a, b)
Definition: common.h:57
av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
Definition: vc1dec.c:5590
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
Definition: mpegvideo.h:476
uint8_t * blk_mv_type_base
Definition: vc1.h:357
av_cold void ff_intrax8_common_init(IntraX8Context *w, MpegEncContext *const s)
Initialize IntraX8 frame decoder.
Definition: intrax8.c:695
int field_mode
1 for interlaced field pictures
Definition: vc1.h:360
av_cold void ff_intrax8_common_end(IntraX8Context *w)
Destroy IntraX8 frame structure.
Definition: intrax8.c:713
static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n, int coded, int codingset, int mquant)
Decode intra block in intra frames - should be faster than decode_intra_block.
Definition: vc1dec.c:2795
int width
picture width / height.
Definition: avcodec.h:1224
int8_t zzi_8x8[64]
Definition: vc1.h:356
#define VC1_SUBBLKPAT_VLC_BITS
Definition: vc1data.c:128
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
Definition: mpegvideo.h:322
uint8_t mv_mode
Frame decoding info for all profiles.
Definition: vc1.h:241
#define FF_PROFILE_VC1_MAIN
Definition: avcodec.h:2668
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:107
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:310
int fourmvswitch
Definition: vc1.h:344
int mb_off
Definition: vc1.h:372
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:2623
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:45
static void vc1_decode_skip_blocks(VC1Context *v)
Definition: vc1dec.c:5183
static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
Definition: vc1dec.c:3379
int size_in_bits
Definition: get_bits.h:56
av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
Definition: vc1dec.c:5515
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:624
static const int offset_table[6]
Definition: vc1dec.c:3377
static int median4(int a, int b, int c, int d)
Definition: vc1dec.c:551
#define FFABS(a)
Definition: common.h:52
int level
level
Definition: avcodec.h:2705
static int vc1_decode_p_mb_intfr(VC1Context *v)
Definition: vc1dec.c:3774
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:522
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:209
MotionEstContext me
Definition: mpegvideo.h:404
#define AV_EF_EXPLODE
Definition: avcodec.h:2417
static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x, int *dmv_y, int *pred_flag)
Definition: vc1dec.c:1178
static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
Definition: vc1dec.c:3443
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]
Definition: h264chroma.h:28
const uint16_t ff_vc1_field_mvpred_scales[2][7][4]
Definition: vc1data.c:1097
#define FF_PROFILE_VC1_SIMPLE
Definition: avcodec.h:2667
uint32_t * cbp
Definition: vc1.h:394
int left_blk_sh
Definition: vc1.h:248
int16_t(* luma_mv_base)[2]
Definition: vc1.h:396
uint8_t * fieldtx_plane
Definition: vc1.h:354
Definition: vf_drawbox.c:37
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:415
int * ttblk_base
Definition: vc1.h:267
VLC * cbpcy_vlc
CBPCY VLC table.
Definition: vc1.h:292
static int decode210(GetBitContext *gb)
Definition: get_bits.h:547
if(ac->has_optimized_func)
static const float pred[4]
Definition: siprdata.h:259
uint8_t * sr_rows[2][2]
Sprite resizer line cache.
Definition: vc1.h:385
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: utils.c:902
static const int8_t mv[256][2]
Definition: 4xm.c:75
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:63
static void vc1_loop_filter_iblk(VC1Context *v, int pq)
Definition: vc1dec.c:170
static void vc1_interp_mc(VC1Context *v)
Motion compensation for direct or interpolated blocks in B-frames.
Definition: vc1dec.c:1903
int first_slice_line
used in mpeg4 too to handle resync markers
Definition: mpegvideo.h:546
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1290
static const int offset_table1[9]
Definition: vc1dec.c:53
NULL
Definition: eval.c:55
static int width
Definition: utils.c:156
#define AV_LOG_INFO
Standard information.
Definition: log.h:134
int res_sprite
Simple/Main Profile sequence header.
Definition: vc1.h:192
void(* vc1_h_loop_filter4)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:49
int top_blk_sh
Either 3 or 0, positions of l/t in blk[].
Definition: vc1.h:248
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:260
enum AVCodecID codec_id
Definition: avcodec.h:1067
BlockDSPContext bdsp
Definition: mpegvideo.h:351
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:58
int c_avail
Definition: vc1.h:271
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:153
const int8_t ff_vc1_adv_interlaced_8x8_zz[64]
Definition: vc1data.c:1047
static const uint8_t vc1_delta_run_table[AC_MODES][57]
Definition: vc1acdata.h:295
static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
Do motion compensation for 4-MV macroblock - luminance block.
Definition: vc1dec.c:564
uint32_t * cbp_base
Definition: vc1.h:394
main external API structure.
Definition: avcodec.h:1050
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:490
uint8_t * is_intra
Definition: vc1.h:395
static int vc1_decode_p_mb_intfi(VC1Context *v)
Definition: vc1dec.c:3987
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
Definition: mpegvideo.h:318
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:223
static void vc1_decode_b_mb_intfi(VC1Context *v)
Decode one B-frame MB (in interlaced field B picture)
Definition: vc1dec.c:4264
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:612
static void init_block_index(VC1Context *v)
Definition: vc1dec.c:78
int curr_use_ic
Definition: vc1.h:305
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void(* sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
Definition: vc1dsp.h:68
int extradata_size
Definition: avcodec.h:1165
const uint8_t ff_vc1_mbmode_intfrp[2][15][4]
Definition: vc1data.c:53
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:271
int sprite_width
Definition: vc1.h:384
int fmb_is_raw
forward mb plane is raw
Definition: vc1.h:299
uint8_t * is_intra_base
Definition: vc1.h:395
int coded_height
Definition: avcodec.h:1234
Definition: vc1.h:139
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:263
#define MB_INTRA_VLC_BITS
Definition: vc1dec.c:48
int index
Definition: gxfenc.c:72
struct AVFrame * f
Definition: mpegvideo.h:100
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:190
static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
Definition: vc1dec.c:753
op_pixels_func put_no_rnd_pixels_tab[2][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:375
int context_initialized
Definition: mpegvideo.h:250
#define VC1_2MV_BLOCK_PATTERN_VLC_BITS
Definition: vc1data.c:124
static int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int a_avail, int c_avail, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value prediction dir: left=0, top=1.
Definition: vc1dec.c:2453
#define MB_TYPE_16x16
Definition: avcodec.h:778
#define mid_pred
Definition: mathops.h:98
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:261
int dim
int skip_is_raw
skip mb plane is not coded
Definition: vc1.h:300
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1677
int ff_intrax8_decode_picture(IntraX8Context *const w, int dquant, int quant_offset)
Decode single IntraX8 frame.
Definition: intrax8.c:728
#define FF_PROFILE_VC1_COMPLEX
Definition: avcodec.h:2669
uint8_t next_lutuv[2][256]
lookup tables used for intensity compensation
Definition: vc1.h:303
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:304
int ref_field_type[2]
forward and backward reference field type (top or bottom)
Definition: vc1.h:371
uint8_t * direct_mb_plane
bitplane for "direct" MBs
Definition: vc1.h:295
static const uint8_t vc1_last_delta_run_table[AC_MODES][10]
Definition: vc1acdata.h:339
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:138
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:339
static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Decode a VC1/WMV3 frame.
Definition: vc1dec.c:5776
uint8_t * mv_type_mb_plane
bitplane for mv_type == (4MV)
Definition: vc1.h:294
int numref
number of past field pictures used as reference
Definition: vc1.h:364
const int32_t ff_vc1_dqscale[63]
Definition: vc1data.c:1085
int blocks_off
Definition: vc1.h:372
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:62
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:283
uint8_t tff
Definition: vc1.h:319
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:141
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:315
uint8_t level
Definition: svq3.c:147
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:192
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:398
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:257
MpegEncContext s
Definition: vc1.h:183
int height
Definition: gxfenc.c:72
in the bitstream is reported as 10b
Definition: vc1.h:174
MpegEncContext.
Definition: mpegvideo.h:204
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:309
int8_t * qscale_table
Definition: mpegvideo.h:104
struct AVCodecContext * avctx
Definition: mpegvideo.h:221
int cur_blk_idx
Definition: vc1.h:393
uint8_t pq
Definition: vc1.h:246
static const int offset_table2[9]
Definition: vc1dec.c:54
discard all non reference
Definition: avcodec.h:565
static void vc1_decode_i_blocks(VC1Context *v)
Decode blocks of I-frame.
Definition: vc1dec.c:4766
int pqindex
raw pqindex used in coding set selection
Definition: vc1.h:270
static const uint8_t vc1_last_delta_level_table[AC_MODES][44]
Definition: vc1acdata.h:246
#define VC1_1REF_MVDATA_VLC_BITS
Definition: vc1data.c:138
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:65
Y , 8bpp.
Definition: pixfmt.h:73
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:256
static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
Definition: vc1dec.c:2071
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:637
void * av_realloc(void *ptr, size_t size)
Allocate or reallocate a block of memory.
Definition: mem.c:117
static enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[]
Definition: vc1dec.c:6149
#define VC1_TTMB_VLC_BITS
Definition: vc1data.c:114
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:33
uint8_t * dest[3]
Definition: mpegvideo.h:417
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1771
static const int size_table[6]
Definition: vc1dec.c:3376
int output_width
Definition: vc1.h:384
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
Definition: vc1.h:316
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:288
uint8_t dquantfrm
pquant parameters
Definition: vc1.h:253
uint8_t next_luty[2][256]
Definition: vc1.h:303
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:308
Bi-dir predicted.
Definition: avutil.h:255
AVProfile.
Definition: avcodec.h:2784
int res_fasttx
reserved, always 1
Definition: vc1.h:196
enum AVDiscard skip_loop_filter
Definition: avcodec.h:2713
int pic_header_flag
Definition: vc1.h:377
int * ttblk
Transform type at the block level.
Definition: vc1.h:267
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:499
VLC ff_vc1_ac_coeff_table[8]
Definition: vc1data.c:143
void(* vc1_v_s_overlap)(int16_t *top, int16_t *bottom)
Definition: vc1dsp.h:46
uint8_t condover
Definition: vc1.h:333
void * priv_data
Definition: avcodec.h:1092
int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Definition: vc1.c:526
#define VC1_INTFR_4MV_MBMODE_VLC_BITS
Definition: vc1data.c:130
#define FF_PROFILE_VC1_ADVANCED
Definition: avcodec.h:2670
uint8_t pquantizer
Uniform (over sequence) quantizer in use.
Definition: vc1.h:291
h264_chroma_mc_func put_h264_chroma_pixels_tab[3]
Definition: h264chroma.h:27
static void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t *is_intra, int pred_flag, int dir)
Predict and set motion vector.
Definition: vc1dec.c:1422
int rnd
rounding control
Definition: vc1.h:306
VideoDSPContext vdsp
Definition: mpegvideo.h:360
Definition: vc1.h:142
AVFrame * sprite_output_frame
Definition: vc1.h:383
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1889
int acpred_is_raw
Definition: vc1.h:330
void(* sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
Definition: vc1dsp.h:67
const int8_t ff_vc1_adv_interlaced_8x4_zz[32]
Definition: vc1data.c:1058
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:600
op_pixels_func avg_no_rnd_pixels_tab[4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:92
uint8_t rptfrm
Definition: vc1.h:319
uint8_t(* curr_luty)[256]
Definition: vc1.h:304
static int decode012(GetBitContext *gb)
Definition: get_bits.h:537
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:66
int bmvtype
Definition: vc1.h:374
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:294
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:191
static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
Do motion compensation for 4-MV macroblock - both chroma blocks.
Definition: vc1dec.c:808
static void vc1_decode_i_blocks_adv(VC1Context *v)
Decode blocks of I-frame for advanced profile.
Definition: vc1dec.c:4910
H264ChromaContext h264chroma
Definition: vc1.h:185
int overflg_is_raw
Definition: vc1.h:332
static av_always_inline int vc1_unescape_buffer(const uint8_t *src, int size, uint8_t *dst)
Definition: vc1.h:424
Definition: vc1.h:136
int level
Advanced Profile.
Definition: vc1.h:206
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:238
int brfd
reference frame distance (forward or backward)
Definition: vc1.h:375
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegvideo.h:110
VLC ff_msmp4_mb_i_vlc
Definition: msmpeg4data.c:38
#define av_always_inline
Definition: attributes.h:40
uint8_t mv_mode2
Secondary MV coding mode (B frames)
Definition: vc1.h:242
int new_sprite
Frame decoding info for sprite modes.
Definition: vc1.h:381
uint8_t * mv_f_next[2]
Definition: vc1.h:359
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:735
#define FFSWAP(type, a, b)
Definition: common.h:60
void(* vc1_h_s_overlap)(int16_t *left, int16_t *right)
Definition: vc1dsp.h:47
int two_sprites
Definition: vc1.h:382
int codingset
index of current table set from 11.8 to use for luma block decoding
Definition: vc1.h:268
uint8_t * mb_type[3]
Definition: vc1.h:272
uint16_t * hrd_buffer
Definition: vc1.h:334
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2969
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2980
#define VC1_INTFR_NON4MV_MBMODE_VLC_BITS
Definition: vc1data.c:132
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:950
void(* vc1_v_overlap)(uint8_t *src, int stride)
Definition: vc1dsp.h:44
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
VLC * fourmvbp_vlc
Definition: vc1.h:351
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
int dc_table_index
Definition: mpegvideo.h:543
VLC ff_msmp4_dc_luma_vlc[2]
Definition: msmpeg4data.c:39
VLC ff_vc1_subblkpat_vlc[3]
Definition: vc1data.c:129
#define inc_blk_idx(idx)
uint8_t halfpq
Uniform quant over image and qp+.5.
Definition: vc1.h:281
static void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
Definition: vc1dec.c:2109
static const uint8_t vc1_delta_level_table[AC_MODES][31]
Definition: vc1acdata.h:203
VC1DSPContext vc1dsp
Definition: vc1.h:186
Predicted.
Definition: avutil.h:254
uint8_t((* curr_lutuv)[256]
Definition: vc1.h:304
static av_always_inline const uint8_t * find_next_marker(const uint8_t *src, const uint8_t *end)
Find VC-1 marker in buffer.
Definition: vc1.h:410
int output_height
Definition: vc1.h:384
void(* vc1_inv_trans_4x8_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:42
VLC ff_msmp4_dc_chroma_vlc[2]
Definition: msmpeg4data.c:40
op_pixels_func put_vc1_mspel_pixels_tab[16]
Definition: vc1dsp.h:58
void(* sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
Definition: vc1dsp.h:66
HpelDSPContext hdsp
Definition: mpegvideo.h:353
void(* vc1_h_loop_filter16)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:53
static int16_t block[64]
Definition: dct-test.c:88