Libav
vp9mvs.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of Libav.
8  *
9  * Libav is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * Libav is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with Libav; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "internal.h"
25 #include "vp56.h"
26 #include "vp9.h"
27 #include "vp9data.h"
28 
29 static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
30  VP9Context *s)
31 {
32  dst->x = av_clip(src->x, s->min_mv.x, s->max_mv.x);
33  dst->y = av_clip(src->y, s->min_mv.y, s->max_mv.y);
34 }
35 
36 static void find_ref_mvs(VP9Context *s,
37  VP56mv *pmv, int ref, int z, int idx, int sb)
38 {
39  static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
40  [BS_64x64] = { { 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
41  { -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 } },
42  [BS_64x32] = { { 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
43  { -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 } },
44  [BS_32x64] = { { -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
45  { -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 } },
46  [BS_32x32] = { { 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
47  { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
48  [BS_32x16] = { { 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
49  { -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
50  [BS_16x32] = { { -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
51  { 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 } },
52  [BS_16x16] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
53  { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
54  [BS_16x8] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
55  { 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 } },
56  [BS_8x16] = { { -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
57  { -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 } },
58  [BS_8x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
59  { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
60  [BS_8x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
61  { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
62  [BS_4x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
63  { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
64  [BS_4x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
65  { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
66  };
67  VP9Block *const b = &s->b;
68  int row = b->row, col = b->col, row7 = b->row7;
69  const int8_t (*p)[2] = mv_ref_blk_off[b->bs];
70 #define INVALID_MV 0x80008000U
71  uint32_t mem = INVALID_MV;
72  int i;
73 
74 #define RETURN_DIRECT_MV(mv) \
75  do { \
76  uint32_t m = AV_RN32A(&mv); \
77  if (!idx) { \
78  AV_WN32A(pmv, m); \
79  return; \
80  } else if (mem == INVALID_MV) { \
81  mem = m; \
82  } else if (m != mem) { \
83  AV_WN32A(pmv, m); \
84  return; \
85  } \
86  } while (0)
87 
88  if (sb >= 0) {
89  if (sb == 2 || sb == 1) {
90  RETURN_DIRECT_MV(b->mv[0][z]);
91  } else if (sb == 3) {
92  RETURN_DIRECT_MV(b->mv[2][z]);
93  RETURN_DIRECT_MV(b->mv[1][z]);
94  RETURN_DIRECT_MV(b->mv[0][z]);
95  }
96 
97 #define RETURN_MV(mv) \
98  do { \
99  if (sb > 0) { \
100  VP56mv tmp; \
101  uint32_t m; \
102  clamp_mv(&tmp, &mv, s); \
103  m = AV_RN32A(&tmp); \
104  if (!idx) { \
105  AV_WN32A(pmv, m); \
106  return; \
107  } else if (mem == INVALID_MV) { \
108  mem = m; \
109  } else if (m != mem) { \
110  AV_WN32A(pmv, m); \
111  return; \
112  } \
113  } else { \
114  uint32_t m = AV_RN32A(&mv); \
115  if (!idx) { \
116  clamp_mv(pmv, &mv, s); \
117  return; \
118  } else if (mem == INVALID_MV) { \
119  mem = m; \
120  } else if (m != mem) { \
121  clamp_mv(pmv, &mv, s); \
122  return; \
123  } \
124  } \
125  } while (0)
126 
127  if (row > 0) {
128  VP9MVRefPair *mv = &s->mv[0][(row - 1) * s->sb_cols * 8 + col];
129 
130  if (mv->ref[0] == ref)
131  RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
132  else if (mv->ref[1] == ref)
133  RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
134  }
135  if (col > s->tiling.tile_col_start) {
136  VP9MVRefPair *mv = &s->mv[0][row * s->sb_cols * 8 + col - 1];
137 
138  if (mv->ref[0] == ref)
139  RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
140  else if (mv->ref[1] == ref)
141  RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
142  }
143  i = 2;
144  } else {
145  i = 0;
146  }
147 
148  // previously coded MVs in the neighborhood, using same reference frame
149  for (; i < 8; i++) {
150  int c = p[i][0] + col, r = p[i][1] + row;
151 
152  if (c >= s->tiling.tile_col_start && c < s->cols &&
153  r >= 0 && r < s->rows) {
154  VP9MVRefPair *mv = &s->mv[0][r * s->sb_cols * 8 + c];
155 
156  if (mv->ref[0] == ref)
157  RETURN_MV(mv->mv[0]);
158  else if (mv->ref[1] == ref)
159  RETURN_MV(mv->mv[1]);
160  }
161  }
162 
163  // MV at this position in previous frame, using same reference frame
164  if (s->use_last_frame_mvs) {
165  VP9MVRefPair *mv = &s->mv[1][row * s->sb_cols * 8 + col];
166 
167  if (mv->ref[0] == ref)
168  RETURN_MV(mv->mv[0]);
169  else if (mv->ref[1] == ref)
170  RETURN_MV(mv->mv[1]);
171  }
172 
173 #define RETURN_SCALE_MV(mv, scale) \
174  do { \
175  if (scale) { \
176  VP56mv mv_temp = { -mv.x, -mv.y }; \
177  RETURN_MV(mv_temp); \
178  } else { \
179  RETURN_MV(mv); \
180  } \
181  } while (0)
182 
183  // previously coded MVs in the neighborhood, using different reference frame
184  for (i = 0; i < 8; i++) {
185  int c = p[i][0] + col, r = p[i][1] + row;
186 
187  if (c >= s->tiling.tile_col_start && c < s->cols &&
188  r >= 0 && r < s->rows) {
189  VP9MVRefPair *mv = &s->mv[0][r * s->sb_cols * 8 + c];
190 
191  if (mv->ref[0] != ref && mv->ref[0] >= 0)
192  RETURN_SCALE_MV(mv->mv[0],
193  s->signbias[mv->ref[0]] != s->signbias[ref]);
194  if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
195  // BUG - libvpx has this condition regardless of whether
196  // we used the first ref MV and pre-scaling
197  AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
198  RETURN_SCALE_MV(mv->mv[1],
199  s->signbias[mv->ref[1]] != s->signbias[ref]);
200  }
201  }
202  }
203 
204  // MV at this position in previous frame, using different reference frame
205  if (s->use_last_frame_mvs) {
206  VP9MVRefPair *mv = &s->mv[1][row * s->sb_cols * 8 + col];
207 
208  if (mv->ref[0] != ref && mv->ref[0] >= 0)
209  RETURN_SCALE_MV(mv->mv[0],
210  s->signbias[mv->ref[0]] != s->signbias[ref]);
211  if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
212  // BUG - libvpx has this condition regardless of whether
213  // we used the first ref MV and pre-scaling
214  AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
215  RETURN_SCALE_MV(mv->mv[1],
216  s->signbias[mv->ref[1]] != s->signbias[ref]);
217  }
218  }
219 
220  AV_ZERO32(pmv);
221 #undef INVALID_MV
222 #undef RETURN_MV
223 #undef RETURN_SCALE_MV
224 }
225 
226 static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
227 {
228  int bit, sign = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].sign);
229  int n, c = vp8_rac_get_tree(&s->c, ff_vp9_mv_class_tree,
230  s->prob.p.mv_comp[idx].classes);
231 
232  s->counts.mv_comp[idx].sign[sign]++;
233  s->counts.mv_comp[idx].classes[c]++;
234  if (c) {
235  int m;
236 
237  for (n = 0, m = 0; m < c; m++) {
238  bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].bits[m]);
239  n |= bit << m;
240  s->counts.mv_comp[idx].bits[m][bit]++;
241  }
242  n <<= 3;
244  s->prob.p.mv_comp[idx].fp);
245  n |= bit << 1;
246  s->counts.mv_comp[idx].fp[bit]++;
247  if (hp) {
248  bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].hp);
249  s->counts.mv_comp[idx].hp[bit]++;
250  n |= bit;
251  } else {
252  n |= 1;
253  // bug in libvpx - we count for bw entropy purposes even if the
254  // bit wasn't coded
255  s->counts.mv_comp[idx].hp[1]++;
256  }
257  n += 8 << c;
258  } else {
259  n = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0);
260  s->counts.mv_comp[idx].class0[n]++;
262  s->prob.p.mv_comp[idx].class0_fp[n]);
263  s->counts.mv_comp[idx].class0_fp[n][bit]++;
264  n = (n << 3) | (bit << 1);
265  if (hp) {
266  bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0_hp);
267  s->counts.mv_comp[idx].class0_hp[bit]++;
268  n |= bit;
269  } else {
270  n |= 1;
271  // bug in libvpx - we count for bw entropy purposes even if the
272  // bit wasn't coded
273  s->counts.mv_comp[idx].class0_hp[1]++;
274  }
275  }
276 
277  return sign ? -(n + 1) : (n + 1);
278 }
279 
280 void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb)
281 {
282  VP9Block *const b = &s->b;
283 
284  if (mode == ZEROMV) {
285  memset(mv, 0, sizeof(*mv) * 2);
286  } else {
287  int hp;
288 
289  // FIXME cache this value and reuse for other subblocks
290  find_ref_mvs(s, &mv[0], b->ref[0], 0, mode == NEARMV,
291  mode == NEWMV ? -1 : sb);
292  // FIXME maybe move this code into find_ref_mvs()
293  if ((mode == NEWMV || sb == -1) &&
294  !(hp = s->highprecisionmvs &&
295  abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
296  if (mv[0].y & 1) {
297  if (mv[0].y < 0)
298  mv[0].y++;
299  else
300  mv[0].y--;
301  }
302  if (mv[0].x & 1) {
303  if (mv[0].x < 0)
304  mv[0].x++;
305  else
306  mv[0].x--;
307  }
308  }
309  if (mode == NEWMV) {
311  s->prob.p.mv_joint);
312 
313  s->counts.mv_joint[j]++;
314  if (j >= MV_JOINT_V)
315  mv[0].y += read_mv_component(s, 0, hp);
316  if (j & 1)
317  mv[0].x += read_mv_component(s, 1, hp);
318  }
319 
320  if (b->comp) {
321  // FIXME cache this value and reuse for other subblocks
322  find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV,
323  mode == NEWMV ? -1 : sb);
324  if ((mode == NEWMV || sb == -1) &&
325  !(hp = s->highprecisionmvs &&
326  abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
327  if (mv[1].y & 1) {
328  if (mv[1].y < 0)
329  mv[1].y++;
330  else
331  mv[1].y--;
332  }
333  if (mv[1].x & 1) {
334  if (mv[1].x < 0)
335  mv[1].x++;
336  else
337  mv[1].x--;
338  }
339  }
340  if (mode == NEWMV) {
342  s->prob.p.mv_joint);
343 
344  s->counts.mv_joint[j]++;
345  if (j >= MV_JOINT_V)
346  mv[1].y += read_mv_component(s, 0, hp);
347  if (j & 1)
348  mv[1].x += read_mv_component(s, 1, hp);
349  }
350  }
351  }
352 }
Definition: vp9.h:238
const int8_t ff_vp9_mv_joint_tree[3][2]
Definition: vp9data.c:2110
struct VP9Context::@77::@79 mv_comp[2]
Definition: vp9.h:240
VP56mv min_mv
Definition: vp9.h:402
int row7
Definition: vp9.h:259
VP5 and VP6 compatible video decoder (common features)
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:387
ProbContext p
Definition: vp9.h:335
Definition: vp9.h:236
int row
Definition: vp9.h:259
Definition: vp9.h:91
Definition: vp9.h:252
uint8_t ref[2]
Definition: vp9.h:253
#define RETURN_MV(mv)
const int8_t ff_vp9_mv_class_tree[10][2]
Definition: vp9data.c:2116
#define AV_RN32A(p)
Definition: intreadwrite.h:446
int16_t y
Definition: vp56.h:68
int col
Definition: vp9.h:259
struct VP9Context::@77 counts
#define b
Definition: input.c:52
#define r
Definition: input.c:51
uint8_t hp
Definition: vp9.h:123
uint8_t sign
Definition: vp9.h:116
uint8_t fp[3]
Definition: vp9.h:121
unsigned mv_joint[4]
Definition: vp9.h:357
Definition: vp9.h:239
Definition: vp9.h:247
uint8_t use_last_frame_mvs
Definition: vp9.h:277
static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src, VP9Context *s)
Definition: vp9mvs.c:29
Definition: vp9.h:242
const int8_t ff_vp9_mv_fp_tree[3][2]
Definition: vp9data.c:2129
VP56mv left_mv_ctx[16][2]
Definition: vp9.h:388
Definition: vp9.h:97
struct VP9Context::@76 prob
uint8_t class0_hp
Definition: vp9.h:122
#define vp56_rac_get_prob
Definition: vp56.h:244
unsigned tile_col_start
Definition: vp9.h:331
if(ac->has_optimized_func)
Definition: vp9.h:90
#define RETURN_SCALE_MV(mv, scale)
unsigned sb_cols
Definition: vp9.h:333
VP56mv mv[2]
Definition: vp9.h:218
Definition: vp9.h:246
static const int8_t mv[256][2]
Definition: 4xm.c:72
Definition: vp9.h:241
VP56mv(* above_mv_ctx)[2]
Definition: vp9.h:388
uint8_t class0_fp[2][3]
Definition: vp9.h:120
Definition: vp9.h:243
static void find_ref_mvs(VP9Context *s, VP56mv *pmv, int ref, int z, int idx, int sb)
Definition: vp9mvs.c:36
uint8_t signbias[3]
Definition: vp9.h:292
enum BlockSize bs
Definition: vp9.h:256
VP56mv mv[4][2]
Definition: vp9.h:255
Definition: vp56.h:66
Definition: vp9.h:237
Definition: vp9.h:248
uint8_t comp
Definition: vp9.h:253
uint8_t bits[10]
Definition: vp9.h:119
Definition: vp9.h:89
static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
Definition: vp9mvs.c:226
uint8_t highprecisionmvs
Definition: vp9.h:284
int16_t x
Definition: vp56.h:67
common internal api header.
Definition: vp9.h:244
void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb)
Definition: vp9mvs.c:280
#define INVALID_MV
#define RETURN_DIRECT_MV(mv)
int8_t ref[2]
Definition: vp9.h:219
uint8_t mv_joint[3]
Definition: vp9.h:114
VP56mv max_mv
Definition: vp9.h:402
uint8_t class0
Definition: vp9.h:118
VP9Block b
Definition: vp9.h:271
#define AV_ZERO32(d)
Definition: intreadwrite.h:534
#define av_always_inline
Definition: attributes.h:40
Definition: vp9.h:245
VP9MVRefPair * mv[2]
Definition: vp9.h:393
struct VP9Context::@74 tiling
MVJoint
Definition: vp9.h:94
uint8_t classes[10]
Definition: vp9.h:117
struct ProbContext::@70 mv_comp[2]
VP56RangeCoder c
Definition: vp9.h:268