32 int dstStride,
int src1Stride,
int h);
35 int src1Stride,
int h);
37 int dstStride,
int src1Stride,
int h);
39 int dstStride,
int src1Stride,
int h);
41 int dstStride,
int src1Stride,
int h);
43 int dstStride,
int src1Stride,
int h);
45 int dstStride,
int srcStride,
int h);
47 int dstStride,
int srcStride,
int h);
49 int dstStride,
int srcStride,
52 int dstStride,
int srcStride,
int h);
54 int dstStride,
int srcStride,
int h);
56 int dstStride,
int srcStride,
59 int dstStride,
int srcStride);
61 int dstStride,
int srcStride);
63 int dstStride,
int srcStride);
65 int dstStride,
int srcStride);
67 int dstStride,
int srcStride);
69 int dstStride,
int srcStride);
70 #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
71 #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
92 int *left,
int *left_top);
109 PIXELS16(
static, ff_avg, , , _mmxext)
110 PIXELS16(static, ff_put, , , _mmxext)
112 #define QPEL_OP(OPNAME, RND, MMX) \
113 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
116 ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \
119 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
123 uint8_t * const half = (uint8_t*)temp; \
124 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
126 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \
127 stride, stride, 8); \
130 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
133 ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \
137 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
141 uint8_t * const half = (uint8_t*)temp; \
142 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
144 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \
148 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
152 uint8_t * const half = (uint8_t*)temp; \
153 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \
155 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \
156 stride, stride, 8); \
159 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
162 ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, \
166 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
170 uint8_t * const half = (uint8_t*)temp; \
171 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \
173 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
177 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
180 uint64_t half[8 + 9]; \
181 uint8_t * const halfH = ((uint8_t*)half) + 64; \
182 uint8_t * const halfHV = ((uint8_t*)half); \
183 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
185 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \
187 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
188 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
192 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
195 uint64_t half[8 + 9]; \
196 uint8_t * const halfH = ((uint8_t*)half) + 64; \
197 uint8_t * const halfHV = ((uint8_t*)half); \
198 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
200 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
202 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
203 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
207 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
210 uint64_t half[8 + 9]; \
211 uint8_t * const halfH = ((uint8_t*)half) + 64; \
212 uint8_t * const halfHV = ((uint8_t*)half); \
213 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
215 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \
217 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
218 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
222 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
225 uint64_t half[8 + 9]; \
226 uint8_t * const halfH = ((uint8_t*)half) + 64; \
227 uint8_t * const halfHV = ((uint8_t*)half); \
228 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
230 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
232 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
233 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
237 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
240 uint64_t half[8 + 9]; \
241 uint8_t * const halfH = ((uint8_t*)half) + 64; \
242 uint8_t * const halfHV = ((uint8_t*)half); \
243 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
245 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
246 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
250 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
253 uint64_t half[8 + 9]; \
254 uint8_t * const halfH = ((uint8_t*)half) + 64; \
255 uint8_t * const halfHV = ((uint8_t*)half); \
256 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
258 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
259 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
263 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
266 uint64_t half[8 + 9]; \
267 uint8_t * const halfH = ((uint8_t*)half); \
268 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
270 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, \
272 ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
276 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
279 uint64_t half[8 + 9]; \
280 uint8_t * const halfH = ((uint8_t*)half); \
281 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
283 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
285 ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
289 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
293 uint8_t * const halfH = ((uint8_t*)half); \
294 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
296 ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
300 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
303 ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \
306 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
310 uint8_t * const half = (uint8_t*)temp; \
311 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
313 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \
317 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
320 ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \
321 stride, stride, 16);\
324 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
328 uint8_t * const half = (uint8_t*)temp; \
329 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
331 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \
332 stride, stride, 16); \
335 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
339 uint8_t * const half = (uint8_t*)temp; \
340 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
342 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \
346 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
349 ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, \
353 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
357 uint8_t * const half = (uint8_t*)temp; \
358 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
360 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \
361 stride, stride, 16); \
364 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
367 uint64_t half[16 * 2 + 17 * 2]; \
368 uint8_t * const halfH = ((uint8_t*)half) + 256; \
369 uint8_t * const halfHV = ((uint8_t*)half); \
370 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
372 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
374 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
376 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
380 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
383 uint64_t half[16 * 2 + 17 * 2]; \
384 uint8_t * const halfH = ((uint8_t*)half) + 256; \
385 uint8_t * const halfHV = ((uint8_t*)half); \
386 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
388 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
390 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
392 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
396 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
399 uint64_t half[16 * 2 + 17 * 2]; \
400 uint8_t * const halfH = ((uint8_t*)half) + 256; \
401 uint8_t * const halfHV = ((uint8_t*)half); \
402 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
404 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
406 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
408 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
412 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
415 uint64_t half[16 * 2 + 17 * 2]; \
416 uint8_t * const halfH = ((uint8_t*)half) + 256; \
417 uint8_t * const halfHV = ((uint8_t*)half); \
418 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
420 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
422 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
424 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
428 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
431 uint64_t half[16 * 2 + 17 * 2]; \
432 uint8_t * const halfH = ((uint8_t*)half) + 256; \
433 uint8_t * const halfHV = ((uint8_t*)half); \
434 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
436 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
438 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
442 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
445 uint64_t half[16 * 2 + 17 * 2]; \
446 uint8_t * const halfH = ((uint8_t*)half) + 256; \
447 uint8_t * const halfHV = ((uint8_t*)half); \
448 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
450 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
452 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
456 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
459 uint64_t half[17 * 2]; \
460 uint8_t * const halfH = ((uint8_t*)half); \
461 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
463 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
465 ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
469 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
472 uint64_t half[17 * 2]; \
473 uint8_t * const halfH = ((uint8_t*)half); \
474 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
476 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
478 ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
482 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
485 uint64_t half[17 * 2]; \
486 uint8_t * const halfH = ((uint8_t*)half); \
487 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
489 ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
493 QPEL_OP(put_,
_, mmxext)
494 QPEL_OP(avg_,
_, mmxext)
495 QPEL_OP(put_no_rnd_, _no_rnd_, mmxext)
498 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
500 c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
501 c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
502 c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
503 c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
504 c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
505 c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
506 c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
507 c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
508 c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
509 c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
510 c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
511 c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
512 c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
513 c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
514 c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
515 c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
528 if (!high_bit_depth) {
554 #if HAVE_MMX_EXTERNAL
562 #if HAVE_MMXEXT_INLINE
572 #if HAVE_MMXEXT_EXTERNAL
606 if (!high_bit_depth) {
627 #if HAVE_SSE2_EXTERNAL
642 #if HAVE_SSSE3_EXTERNAL
656 #if HAVE_SSE4_EXTERNAL
665 #if HAVE_7REGS && HAVE_INLINE_ASM
void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w)
#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX)
void(* put_signed_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride)
void ff_vector_clip_int32_sse4(int32_t *dst, const int32_t *src, int32_t min, int32_t max, unsigned int len)
static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int cpu_flags)
int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order)
void ff_idct_xvid_sse2(short *block)
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
#define AV_CPU_FLAG_CMOV
i686 cmov
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
Macro definitions for various function/variable attributes.
void ff_clear_blocks_mmx(int16_t *blocks)
#define EXTERNAL_SSE4(flags)
void ff_clear_blocks_sse(int16_t *blocks)
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
void ff_idct_xvid_sse2_put(uint8_t *dest, int line_size, short *block)
void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels, int line_size)
void(* add_bytes)(uint8_t *dst, uint8_t *src, int w)
#define AV_CPU_FLAG_ATOM
Atom processor, some SSSE3 instructions are slower.
void(* add_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels, int line_size)
void ff_vector_clip_int32_mmx(int32_t *dst, const int32_t *src, int32_t min, int32_t max, unsigned int len)
void ff_idct_xvid_mmxext_put(uint8_t *dest, int line_size, int16_t *block)
void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx)
void ff_clear_block_sse(int16_t *block)
Libavcodec version macros.
#define AV_CPU_FLAG_SSE42
Nehalem SSE4.2 functions.
int32_t(* scalarproduct_int16)(const int16_t *v1, const int16_t *v2, int len)
Calculate scalar product of two vectors.
void ff_gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
#define FF_SSE2_IDCT_PERM
static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx, int cpu_flags)
void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src, int32_t min, int32_t max, unsigned int len)
void ff_idct_xvid_sse2_add(uint8_t *dest, int line_size, short *block)
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx, int cpu_flags)
int(* add_hfyu_left_prediction)(uint8_t *dst, const uint8_t *src, int w, int left)
void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top)
void ff_simple_idct_mmx(int16_t *block)
void(* idct)(int16_t *block)
void ff_simple_idct_put_mmx(uint8_t *dest, int line_size, int16_t *block)
common internal API header
void(* add_hfyu_median_prediction)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top)
void(* clear_blocks)(int16_t *blocks)
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride)
int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left)
void ff_simple_idct_add_mmx(uint8_t *dest, int line_size, int16_t *block)
void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2, int order)
#define AV_CPU_FLAG_3DNOW
AMD 3DNOW.
void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
void(* put_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
int idct_permutation_type
void(* vector_clipf)(float *dst, const float *src, float min, float max, int len)
main external API structure.
#define FF_SIMPLE_IDCT_PERM
av_cold void ff_dsputil_init_x86(DSPContext *c, AVCodecContext *avctx)
int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left)
int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul)
void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride)
#define EXTERNAL_SSSE3(flags)
int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul)
#define AV_CPU_FLAG_SSE4
Penryn SSE4.1 functions.
void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, int16_t *block)
static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int cpu_flags)
void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, int16_t *block)
void ff_idct_xvid_mmxext_add(uint8_t *dest, int line_size, int16_t *block)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
void ff_clear_block_mmx(int16_t *block)
void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
void ff_vector_clip_int32_sse2(int32_t *dst, const int32_t *src, int32_t min, int32_t max, unsigned int len)
header for Xvid IDCT functions
void(* vector_clip_int32)(int32_t *dst, const int32_t *src, int32_t min, int32_t max, unsigned int len)
Clip each element in an array of int32_t to a given minimum and maximum value.
#define FF_DISABLE_DEPRECATION_WARNINGS
void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride)
void(* gmc)(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
global motion compensation.
void ff_add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top)
void(* clear_block)(int16_t *block)
int32_t(* scalarproduct_and_madd_int16)(int16_t *v1, const int16_t *v2, const int16_t *v3, int len, int mul)
Calculate scalar product of v1 and v2, and v1[i] += v3[i] * mul.
#define X86_MMXEXT(flags)
void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
#define FF_IDCT_SIMPLEMMX
static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx, int cpu_flags)
void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride)
#define FF_ENABLE_DEPRECATION_WARNINGS
void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
void ff_draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels, int line_size)
void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w)
void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx, int cpu_flags)
void ff_idct_xvid_mmx(short *block)
#define CONFIG_MPEG_XVMC_DECODER
void ff_vector_clipf_sse(float *dst, const float *src, float min, float max, int len)
void ff_idct_xvid_mmxext(short *block)
void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
void ff_add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride)
int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul)
void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
#define PIXELS16(STATIC, PFX1, PFX2, TYPE, CPUEXT)