41 return aa->
sym - bb->sym;
51 for (i = 0; i < 4; i++)
61 uint32_t original_format;
81 "4:2:0 video requires even width and height.\n");
89 if (avctx->
width & 1) {
91 "4:2:2 video requires even width.\n");
109 "Prediction method %d is not supported in Ut Video.\n",
116 "Plane prediction is not supported in Ut Video.\n");
148 for (i = 0; i < c->
planes; i++) {
204 int k = 2 * dst_stride;
207 for (j = 0; j <
height; j++) {
209 for (i = 0; i < width *
step; i +=
step) {
213 dst[1][k] = src[i + 2] -
g;
214 dst[2][k] = src[i + 0] -
g;
218 for (i = 0; i < width *
step; i +=
step) {
222 dst[1][k] = src[i + 2] -
g;
223 dst[2][k] = src[i + 0] -
g;
224 dst[3][k] = src[i + 3];
228 k += dst_stride -
width;
239 for (j = 0; j <
height; j++) {
240 for (i = 0; i <
width; i++)
255 for (j = 0; j <
height; j++) {
256 for (i = 0; i <
width; i++) {
257 *dst++ = src[i] - prev;
274 for (i = 0; i <
width; i++) {
275 *dst++ = src[i] - prev;
291 for (j = 1; j <
height; j++) {
300 int height, uint64_t *counts)
304 for (j = 0; j <
height; j++) {
305 for (i = 0; i <
width; i++) {
321 while (he[last].
len == 255 && last)
325 for (i = last; i >= 0; i--) {
326 he[i].
code = code >> (32 - he[i].
len);
327 code += 0x80000000u >> (he[i].
len - 1);
344 for (j = 0; j <
height; j++) {
345 for (i = 0; i <
width; i++)
372 uint64_t counts[256] = { 0 };
376 uint32_t offset = 0, slice_len = 0;
377 int i, sstart, send = 0;
383 for (i = 0; i < c->
slices; i++) {
385 send = height * (i + 1) / c->
slices;
386 write_plane(src + sstart * stride, dst + sstart * width,
387 stride, width, send - sstart);
391 for (i = 0; i < c->
slices; i++) {
393 send = height * (i + 1) / c->
slices;
394 left_predict(src + sstart * stride, dst + sstart * width,
395 stride, width, send - sstart);
399 for (i = 0; i < c->
slices; i++) {
401 send = height * (i + 1) / c->
slices;
403 stride, width, send - sstart);
416 for (symbol = 0; symbol < 256; symbol++) {
418 if (counts[symbol]) {
420 if (counts[symbol] == width * height) {
425 for (i = 0; i < 256; i++) {
427 bytestream2_put_byte(pb, 0);
429 bytestream2_put_byte(pb, 0xFF);
433 for (i = 0; i < c->
slices; i++)
434 bytestream2_put_le32(pb, 0);
451 for (i = 0; i < 256; i++) {
452 bytestream2_put_byte(pb, lengths[i]);
454 he[i].
len = lengths[i];
462 for (i = 0; i < c->
slices; i++) {
464 send = height * (i + 1) / c->
slices;
471 width * (send - sstart),
width,
472 send - sstart, he) >> 3;
474 slice_len = offset - slice_len;
482 bytestream2_put_le32(pb, offset);
486 offset - slice_len, SEEK_CUR);
505 const AVFrame *pic,
int *got_packet)
523 "Error allocating the output packet, or the provided packet "
549 for (i = 0; i < c->
planes; i++) {
561 for (i = 0; i < c->
planes; i++) {
563 pic->
linesize[i], width >> !!i, height, &pb);
572 for (i = 0; i < c->
planes; i++) {
574 pic->
linesize[i], width >> !!i, height >> !!i,
595 bytestream2_put_le32(&pb, frame_info);
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static void write_plane(uint8_t *src, uint8_t *dst, int stride, int width, int height)
static void mangle_rgb_planes(uint8_t *dst[4], int dst_stride, uint8_t *src, int step, int stride, int width, int height)
static int huff_cmp_sym(const void *a, const void *b)
packed RGB 8:8:8, 24bpp, RGBRGB...
AVFrame * coded_frame
the picture in the bitstream
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
void ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
AVCodec ff_utvideo_encoder
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
#define MKTAG(a, b, c, d)
static int init(AVCodecParserContext *s)
static void left_predict(uint8_t *src, uint8_t *dst, int stride, int width, int height)
const int ff_ut_pred_order[5]
void(* sub_hfyu_median_prediction)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
subtract huffyuv's variant of median prediction note, this might read from src1[-1], src2[-1]
int reference
is this picture used as reference The values for this are the same as the MpegEncContext.picture_structure variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void av_log(void *avcl, int level, const char *fmt,...)
const char * name
Name of the codec implementation.
static void put_bits(PutBitContext *s, int n, unsigned int value)
Write up to 31 bits into a bitstream.
static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he)
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int flags
A combination of AV_PKT_FLAG values.
static int put_bits_count(PutBitContext *s)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, int stride, int width, int height)
AVFrame * avcodec_alloc_frame(void)
Allocate an AVFrame and set its fields to default values.
enum AVPictureType pict_type
Picture type of the frame, see ?_TYPE below.
int width
picture width / height.
int ff_alloc_packet(AVPacket *avpkt, int size)
Check AVPacket size and/or allocate data.
static int encode_plane(AVCodecContext *avctx, uint8_t *src, uint8_t *dst, int stride, int width, int height, PutByteContext *pb)
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
int linesize[AV_NUM_DATA_POINTERS]
Size, in bytes, of the data for each picture/channel plane.
main external API structure.
static void close(AVCodecParserContext *s)
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
#define FF_INPUT_BUFFER_PADDING_SIZE
huffman tree builder and VLC generator
int ff_ut_huff_cmp_len(const void *a, const void *b)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void calculate_codes(HuffEntry *he)
static void count_usage(uint8_t *src, int width, int height, uint64_t *counts)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
struct UtvideoContext UtvideoContext
#define AVERROR_INVALIDDATA
int prediction_method
prediction method (needed for huffyuv)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
#define AVERROR_OPTION_NOT_FOUND
uint8_t * slice_buffer[4]
int key_frame
1 -> keyframe, 0-> not
AVPixelFormat
Pixel format.
static av_cold int utvideo_encode_close(AVCodecContext *avctx)
This structure stores compressed data.
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...