Libav
output.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <string.h>
35 #include <math.h>
36 
38 #include "libavutil/mathematics.h"
39 #include "libavutil/opt.h"
40 #include "libavformat/avformat.h"
42 #include "libswscale/swscale.h"
43 
44 /* 5 seconds stream duration */
45 #define STREAM_DURATION 5.0
46 #define STREAM_FRAME_RATE 25 /* 25 images/s */
47 #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
48 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
49 
50 #define SCALE_FLAGS SWS_BICUBIC
51 
52 // a wrapper around a single output AVStream
53 typedef struct OutputStream {
54  AVStream *st;
55 
56  /* pts of the next frame that will be generated */
57  int64_t next_pts;
58 
61 
62  float t, tincr, tincr2;
63 
66 } OutputStream;
67 
68 /**************************************************************/
69 /* audio output */
70 
71 /*
72  * add an audio output stream
73  */
75  enum AVCodecID codec_id)
76 {
77  AVCodecContext *c;
78  AVCodec *codec;
79  int ret;
80 
81  /* find the audio encoder */
82  codec = avcodec_find_encoder(codec_id);
83  if (!codec) {
84  fprintf(stderr, "codec not found\n");
85  exit(1);
86  }
87 
88  ost->st = avformat_new_stream(oc, codec);
89  if (!ost->st) {
90  fprintf(stderr, "Could not alloc stream\n");
91  exit(1);
92  }
93 
94  c = ost->st->codec;
95 
96  /* put sample parameters */
97  c->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_S16;
98  c->sample_rate = codec->supported_samplerates ? codec->supported_samplerates[0] : 44100;
101  c->bit_rate = 64000;
102 
103  ost->st->time_base = (AVRational){ 1, c->sample_rate };
104 
105  // some formats want stream headers to be separate
106  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
108 
109  /* initialize sample format conversion;
110  * to simplify the code, we always pass the data through lavr, even
111  * if the encoder supports the generated format directly -- the price is
112  * some extra data copying;
113  */
114  ost->avr = avresample_alloc_context();
115  if (!ost->avr) {
116  fprintf(stderr, "Error allocating the resampling context\n");
117  exit(1);
118  }
119 
120  av_opt_set_int(ost->avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
121  av_opt_set_int(ost->avr, "in_sample_rate", 44100, 0);
122  av_opt_set_int(ost->avr, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
123  av_opt_set_int(ost->avr, "out_sample_fmt", c->sample_fmt, 0);
124  av_opt_set_int(ost->avr, "out_sample_rate", c->sample_rate, 0);
125  av_opt_set_int(ost->avr, "out_channel_layout", c->channel_layout, 0);
126 
127  ret = avresample_open(ost->avr);
128  if (ret < 0) {
129  fprintf(stderr, "Error opening the resampling context\n");
130  exit(1);
131  }
132 }
133 
135  uint64_t channel_layout,
136  int sample_rate, int nb_samples)
137 {
138  AVFrame *frame = av_frame_alloc();
139  int ret;
140 
141  if (!frame) {
142  fprintf(stderr, "Error allocating an audio frame\n");
143  exit(1);
144  }
145 
146  frame->format = sample_fmt;
147  frame->channel_layout = channel_layout;
148  frame->sample_rate = sample_rate;
149  frame->nb_samples = nb_samples;
150 
151  if (nb_samples) {
152  ret = av_frame_get_buffer(frame, 0);
153  if (ret < 0) {
154  fprintf(stderr, "Error allocating an audio buffer\n");
155  exit(1);
156  }
157  }
158 
159  return frame;
160 }
161 
163 {
164  AVCodecContext *c;
165  int nb_samples;
166 
167  c = ost->st->codec;
168 
169  /* open it */
170  if (avcodec_open2(c, NULL, NULL) < 0) {
171  fprintf(stderr, "could not open codec\n");
172  exit(1);
173  }
174 
175  /* init signal generator */
176  ost->t = 0;
177  ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
178  /* increment frequency by 110 Hz per second */
179  ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
180 
182  nb_samples = 10000;
183  else
184  nb_samples = c->frame_size;
185 
187  c->sample_rate, nb_samples);
189  44100, nb_samples);
190 }
191 
192 /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
193  * 'nb_channels' channels. */
195 {
196  AVFrame *frame = ost->tmp_frame;
197  int j, i, v;
198  int16_t *q = (int16_t*)frame->data[0];
199 
200  /* check if we want to generate more frames */
201  if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
202  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
203  return NULL;
204 
205 
206  for (j = 0; j < frame->nb_samples; j++) {
207  v = (int)(sin(ost->t) * 10000);
208  for (i = 0; i < ost->st->codec->channels; i++)
209  *q++ = v;
210  ost->t += ost->tincr;
211  ost->tincr += ost->tincr2;
212  }
213 
214  return frame;
215 }
216 
217 /* if a frame is provided, send it to the encoder, otherwise flush the encoder;
218  * return 1 when encoding is finished, 0 otherwise
219  */
221  AVFrame *frame)
222 {
223  AVPacket pkt = { 0 }; // data and size must be 0;
224  int got_packet;
225 
226  av_init_packet(&pkt);
227  avcodec_encode_audio2(ost->st->codec, &pkt, frame, &got_packet);
228 
229  if (got_packet) {
230  pkt.stream_index = ost->st->index;
231 
232  av_packet_rescale_ts(&pkt, ost->st->codec->time_base, ost->st->time_base);
233 
234  /* Write the compressed frame to the media file. */
235  if (av_interleaved_write_frame(oc, &pkt) != 0) {
236  fprintf(stderr, "Error while writing audio frame\n");
237  exit(1);
238  }
239  }
240 
241  return (frame || got_packet) ? 0 : 1;
242 }
243 
244 /*
245  * encode one audio frame and send it to the muxer
246  * return 1 when encoding is finished, 0 otherwise
247  */
249 {
250  AVFrame *frame;
251  int got_output = 0;
252  int ret;
253 
254  frame = get_audio_frame(ost);
255  got_output |= !!frame;
256 
257  /* feed the data to lavr */
258  if (frame) {
259  ret = avresample_convert(ost->avr, NULL, 0, 0,
260  frame->extended_data, frame->linesize[0],
261  frame->nb_samples);
262  if (ret < 0) {
263  fprintf(stderr, "Error feeding audio data to the resampler\n");
264  exit(1);
265  }
266  }
267 
268  while ((frame && avresample_available(ost->avr) >= ost->frame->nb_samples) ||
269  (!frame && avresample_get_out_samples(ost->avr, 0))) {
270  /* when we pass a frame to the encoder, it may keep a reference to it
271  * internally;
272  * make sure we do not overwrite it here
273  */
274  ret = av_frame_make_writable(ost->frame);
275  if (ret < 0)
276  exit(1);
277 
278  /* the difference between the two avresample calls here is that the
279  * first one just reads the already converted data that is buffered in
280  * the lavr output buffer, while the second one also flushes the
281  * resampler */
282  if (frame) {
283  ret = avresample_read(ost->avr, ost->frame->extended_data,
284  ost->frame->nb_samples);
285  } else {
286  ret = avresample_convert(ost->avr, ost->frame->extended_data,
287  ost->frame->linesize[0], ost->frame->nb_samples,
288  NULL, 0, 0);
289  }
290 
291  if (ret < 0) {
292  fprintf(stderr, "Error while resampling\n");
293  exit(1);
294  } else if (frame && ret != ost->frame->nb_samples) {
295  fprintf(stderr, "Too few samples returned from lavr\n");
296  exit(1);
297  }
298 
299  ost->frame->nb_samples = ret;
300 
301  ost->frame->pts = ost->next_pts;
302  ost->next_pts += ost->frame->nb_samples;
303 
304  got_output |= encode_audio_frame(oc, ost, ret ? ost->frame : NULL);
305  }
306 
307  return !got_output;
308 }
309 
310 /**************************************************************/
311 /* video output */
312 
313 /* Add a video output stream. */
315  enum AVCodecID codec_id)
316 {
317  AVCodecContext *c;
318  AVCodec *codec;
319 
320  /* find the video encoder */
321  codec = avcodec_find_encoder(codec_id);
322  if (!codec) {
323  fprintf(stderr, "codec not found\n");
324  exit(1);
325  }
326 
327  ost->st = avformat_new_stream(oc, codec);
328  if (!ost->st) {
329  fprintf(stderr, "Could not alloc stream\n");
330  exit(1);
331  }
332 
333  c = ost->st->codec;
334 
335  /* Put sample parameters. */
336  c->bit_rate = 400000;
337  /* Resolution must be a multiple of two. */
338  c->width = 352;
339  c->height = 288;
340  /* timebase: This is the fundamental unit of time (in seconds) in terms
341  * of which frame timestamps are represented. For fixed-fps content,
342  * timebase should be 1/framerate and timestamp increments should be
343  * identical to 1. */
344  ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
345  c->time_base = ost->st->time_base;
346 
347  c->gop_size = 12; /* emit one intra frame every twelve frames at most */
348  c->pix_fmt = STREAM_PIX_FMT;
349  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
350  /* just for testing, we also add B frames */
351  c->max_b_frames = 2;
352  }
353  if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
354  /* Needed to avoid using macroblocks in which some coeffs overflow.
355  * This does not happen with normal video, it just happens here as
356  * the motion of the chroma plane does not match the luma plane. */
357  c->mb_decision = 2;
358  }
359  /* Some formats want stream headers to be separate. */
360  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
362 }
363 
365 {
366  AVFrame *picture;
367  int ret;
368 
369  picture = av_frame_alloc();
370  if (!picture)
371  return NULL;
372 
373  picture->format = pix_fmt;
374  picture->width = width;
375  picture->height = height;
376 
377  /* allocate the buffers for the frame data */
378  ret = av_frame_get_buffer(picture, 32);
379  if (ret < 0) {
380  fprintf(stderr, "Could not allocate frame data.\n");
381  exit(1);
382  }
383 
384  return picture;
385 }
386 
388 {
389  AVCodecContext *c;
390 
391  c = ost->st->codec;
392 
393  /* open the codec */
394  if (avcodec_open2(c, NULL, NULL) < 0) {
395  fprintf(stderr, "could not open codec\n");
396  exit(1);
397  }
398 
399  /* Allocate the encoded raw picture. */
400  ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
401  if (!ost->frame) {
402  fprintf(stderr, "Could not allocate picture\n");
403  exit(1);
404  }
405 
406  /* If the output format is not YUV420P, then a temporary YUV420P
407  * picture is needed too. It is then converted to the required
408  * output format. */
409  ost->tmp_frame = NULL;
410  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
412  if (!ost->tmp_frame) {
413  fprintf(stderr, "Could not allocate temporary picture\n");
414  exit(1);
415  }
416  }
417 }
418 
419 /* Prepare a dummy image. */
420 static void fill_yuv_image(AVFrame *pict, int frame_index,
421  int width, int height)
422 {
423  int x, y, i, ret;
424 
425  /* when we pass a frame to the encoder, it may keep a reference to it
426  * internally;
427  * make sure we do not overwrite it here
428  */
429  ret = av_frame_make_writable(pict);
430  if (ret < 0)
431  exit(1);
432 
433  i = frame_index;
434 
435  /* Y */
436  for (y = 0; y < height; y++)
437  for (x = 0; x < width; x++)
438  pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
439 
440  /* Cb and Cr */
441  for (y = 0; y < height / 2; y++) {
442  for (x = 0; x < width / 2; x++) {
443  pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
444  pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
445  }
446  }
447 }
448 
450 {
451  AVCodecContext *c = ost->st->codec;
452 
453  /* check if we want to generate more frames */
454  if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
455  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
456  return NULL;
457 
458  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
459  /* as we only generate a YUV420P picture, we must convert it
460  * to the codec pixel format if needed */
461  if (!ost->sws_ctx) {
462  ost->sws_ctx = sws_getContext(c->width, c->height,
464  c->width, c->height,
465  c->pix_fmt,
467  if (!ost->sws_ctx) {
468  fprintf(stderr,
469  "Cannot initialize the conversion context\n");
470  exit(1);
471  }
472  }
473  fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
474  sws_scale(ost->sws_ctx, ost->tmp_frame->data, ost->tmp_frame->linesize,
475  0, c->height, ost->frame->data, ost->frame->linesize);
476  } else {
477  fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
478  }
479 
480  ost->frame->pts = ost->next_pts++;
481 
482  return ost->frame;
483 }
484 
485 /*
486  * encode one video frame and send it to the muxer
487  * return 1 when encoding is finished, 0 otherwise
488  */
490 {
491  int ret;
492  AVCodecContext *c;
493  AVFrame *frame;
494  int got_packet = 0;
495 
496  c = ost->st->codec;
497 
498  frame = get_video_frame(ost);
499 
500  if (oc->oformat->flags & AVFMT_RAWPICTURE) {
501  /* a hack to avoid data copy with some raw video muxers */
502  AVPacket pkt;
503  av_init_packet(&pkt);
504 
505  if (!frame)
506  return 1;
507 
508  pkt.flags |= AV_PKT_FLAG_KEY;
509  pkt.stream_index = ost->st->index;
510  pkt.data = (uint8_t *)frame;
511  pkt.size = sizeof(AVPicture);
512 
513  pkt.pts = pkt.dts = frame->pts;
514  av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
515 
516  ret = av_interleaved_write_frame(oc, &pkt);
517  } else {
518  AVPacket pkt = { 0 };
519  av_init_packet(&pkt);
520 
521  /* encode the image */
522  ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
523  if (ret < 0) {
524  fprintf(stderr, "Error encoding a video frame\n");
525  exit(1);
526  }
527 
528  if (got_packet) {
529  av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
530  pkt.stream_index = ost->st->index;
531 
532  /* Write the compressed frame to the media file. */
533  ret = av_interleaved_write_frame(oc, &pkt);
534  }
535  }
536  if (ret != 0) {
537  fprintf(stderr, "Error while writing video frame\n");
538  exit(1);
539  }
540 
541  return (frame || got_packet) ? 0 : 1;
542 }
543 
545 {
546  avcodec_close(ost->st->codec);
547  av_frame_free(&ost->frame);
548  av_frame_free(&ost->tmp_frame);
549  sws_freeContext(ost->sws_ctx);
550  avresample_free(&ost->avr);
551 }
552 
553 /**************************************************************/
554 /* media file output */
555 
556 int main(int argc, char **argv)
557 {
558  OutputStream video_st = { 0 }, audio_st = { 0 };
559  const char *filename;
560  AVOutputFormat *fmt;
561  AVFormatContext *oc;
562  int have_video = 0, have_audio = 0;
563  int encode_video = 0, encode_audio = 0;
564 
565  /* Initialize libavcodec, and register all codecs and formats. */
566  av_register_all();
567 
568  if (argc != 2) {
569  printf("usage: %s output_file\n"
570  "API example program to output a media file with libavformat.\n"
571  "The output format is automatically guessed according to the file extension.\n"
572  "Raw images can also be output by using '%%d' in the filename\n"
573  "\n", argv[0]);
574  return 1;
575  }
576 
577  filename = argv[1];
578 
579  /* Autodetect the output format from the name. default is MPEG. */
580  fmt = av_guess_format(NULL, filename, NULL);
581  if (!fmt) {
582  printf("Could not deduce output format from file extension: using MPEG.\n");
583  fmt = av_guess_format("mpeg", NULL, NULL);
584  }
585  if (!fmt) {
586  fprintf(stderr, "Could not find suitable output format\n");
587  return 1;
588  }
589 
590  /* Allocate the output media context. */
591  oc = avformat_alloc_context();
592  if (!oc) {
593  fprintf(stderr, "Memory error\n");
594  return 1;
595  }
596  oc->oformat = fmt;
597  snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
598 
599  /* Add the audio and video streams using the default format codecs
600  * and initialize the codecs. */
601  if (fmt->video_codec != AV_CODEC_ID_NONE) {
602  add_video_stream(&video_st, oc, fmt->video_codec);
603  have_video = 1;
604  encode_video = 1;
605  }
606  if (fmt->audio_codec != AV_CODEC_ID_NONE) {
607  add_audio_stream(&audio_st, oc, fmt->audio_codec);
608  have_audio = 1;
609  encode_audio = 1;
610  }
611 
612  /* Now that all the parameters are set, we can open the audio and
613  * video codecs and allocate the necessary encode buffers. */
614  if (have_video)
615  open_video(oc, &video_st);
616  if (have_audio)
617  open_audio(oc, &audio_st);
618 
619  av_dump_format(oc, 0, filename, 1);
620 
621  /* open the output file, if needed */
622  if (!(fmt->flags & AVFMT_NOFILE)) {
623  if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
624  fprintf(stderr, "Could not open '%s'\n", filename);
625  return 1;
626  }
627  }
628 
629  /* Write the stream header, if any. */
631 
632  while (encode_video || encode_audio) {
633  /* select the stream to encode */
634  if (encode_video &&
635  (!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
636  audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
637  encode_video = !write_video_frame(oc, &video_st);
638  } else {
639  encode_audio = !process_audio_stream(oc, &audio_st);
640  }
641  }
642 
643  /* Write the trailer, if any. The trailer must be written before you
644  * close the CodecContexts open when you wrote the header; otherwise
645  * av_write_trailer() may try to use memory that was freed on
646  * av_codec_close(). */
647  av_write_trailer(oc);
648 
649  /* Close each codec. */
650  if (have_video)
651  close_stream(oc, &video_st);
652  if (have_audio)
653  close_stream(oc, &audio_st);
654 
655  if (!(fmt->flags & AVFMT_NOFILE))
656  /* Close the output file. */
657  avio_close(oc->pb);
658 
659  /* free the stream */
661 
662  return 0;
663 }
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:778
int64_t next_pts
Definition: output.c:57
const struct AVCodec * codec
Definition: avcodec.h:1059
This structure describes decoded (raw) audio or video data.
Definition: frame.h:135
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:1761
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:535
#define CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:771
static void close_stream(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:544
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:238
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1309
int avresample_read(AVAudioResampleContext *avr, uint8_t **output, int nb_samples)
Read samples from the output FIFO.
Definition: utils.c:768
enum AVCodecID video_codec
default video codec
Definition: avformat.h:457
int index
stream index in AVFormatContext
Definition: avformat.h:700
int size
Definition: avcodec.h:974
float tincr
Definition: output.c:62
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:293
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1254
int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1314
#define AV_CH_LAYOUT_STEREO
four components are given, that's all.
Definition: avcodec.h:3026
AVCodec.
Definition: avcodec.h:2796
void avresample_free(AVAudioResampleContext **avr)
Free AVAudioResampleContext and associated AVOption values.
Definition: utils.c:278
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1175
Format I/O context.
Definition: avformat.h:922
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1799
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT
Definition: avformat.h:465
uint8_t
#define STREAM_DURATION
Definition: output.c:45
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:57
AVOptions.
#define CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:657
AVAudioResampleContext * avr
Definition: output.c:65
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:211
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:2521
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:1412
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:98
uint8_t * data
Definition: avcodec.h:973
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
Definition: utils.c:1321
external api for the swscale stuff
static AVFrame * alloc_audio_frame(enum AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples)
Definition: output.c:134
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
Definition: output.c:420
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:941
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1019
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:395
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:1710
const uint64_t * channel_layouts
array of support channel layouts, or NULL if unknown. array is terminated by 0
Definition: avcodec.h:2820
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:105
int width
width and height of the video frame
Definition: frame.h:174
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:385
static int encode_audio_frame(AVFormatContext *oc, OutputStream *ost, AVFrame *frame)
Definition: output.c:220
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:69
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:800
int capabilities
Codec capabilities.
Definition: avcodec.h:2815
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1144
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:268
AVFrame * frame
Definition: output.c:59
enum AVCodecID codec_id
Definition: mov_chan.c:432
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:979
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1852
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:134
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:718
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:381
int bit_rate
the average bitrate
Definition: avcodec.h:1114
static AVFrame * get_video_frame(OutputStream *ost)
Definition: output.c:449
audio channel layout utility functions
char filename[1024]
input or output filename
Definition: avformat.h:998
external API header
static int process_audio_stream(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:248
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:489
int width
picture width / height.
Definition: avcodec.h:1224
static AVFrame * get_audio_frame(OutputStream *ost)
Definition: output.c:194
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:415
static void add_video_stream(OutputStream *ost, AVFormatContext *oc, enum AVCodecID codec_id)
Definition: output.c:314
int main(int argc, char **argv)
Definition: output.c:556
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:1666
AVOutputFormat * av_guess_format(const char *short_name, const char *filename, const char *mime_type)
Return the output format in the list of registered output formats which best matches the provided par...
Definition: format.c:104
int mb_decision
macroblock decision mode
Definition: avcodec.h:1581
enum AVPixelFormat pix_fmt
Definition: movenc.c:843
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:110
if(ac->has_optimized_func)
Stream structure.
Definition: avformat.h:699
static void add_audio_stream(OutputStream *ost, AVFormatContext *oc, enum AVCodecID codec_id)
Definition: output.c:74
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:186
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1811
NULL
Definition: eval.c:55
int avresample_available(AVAudioResampleContext *avr)
Return the number of available samples in the output FIFO.
Definition: utils.c:744
static int width
Definition: utils.c:156
sample_fmt
Definition: avconv_filter.c:68
#define STREAM_PIX_FMT
Definition: output.c:48
AVSampleFormat
Audio Sample Formats.
Definition: samplefmt.h:61
enum AVCodecID codec_id
Definition: avcodec.h:1067
int sample_rate
samples per second
Definition: avcodec.h:1791
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:153
AVIOContext * pb
I/O context.
Definition: avformat.h:964
#define AVFMT_RAWPICTURE
Format wants AVPicture structure for raw picture data.
Definition: avformat.h:412
main external API structure.
Definition: avcodec.h:1050
int avresample_get_out_samples(AVAudioResampleContext *avr, int in_nb_samples)
Provide the upper bound on the number of samples the configured conversion would output.
Definition: utils.c:749
int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
Scale the image slice in srcSlice and put the resulting scaled slice in the image in dst...
int sample_rate
Sample rate of the audio data.
Definition: frame.h:376
rational number numerator/denominator
Definition: rational.h:43
#define SCALE_FLAGS
Definition: output.c:50
static AVFrame * alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
Definition: output.c:364
int avresample_convert(AVAudioResampleContext *avr, uint8_t **output, int out_plane_size, int out_samples, uint8_t **input, int in_plane_size, int in_samples)
Convert input samples and write them to the output FIFO.
Definition: utils.c:330
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:982
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:2445
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:175
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:325
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:141
int height
Definition: gxfenc.c:72
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1245
AVAudioResampleContext * avresample_alloc_context(void)
Allocate AVAudioResampleContext and set options.
Definition: options.c:96
Main libavformat public API header.
static void open_video(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:387
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:65
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:409
signed 16 bits
Definition: samplefmt.h:64
AVStream * st
Definition: avconv.h:303
static void open_audio(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:162
float t
Definition: output.c:62
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:47
int channels
number of audio channels
Definition: avcodec.h:1792
enum AVCodecID audio_codec
default audio codec
Definition: avformat.h:456
const int * supported_samplerates
array of supported audio samplerates, or NULL if unknown, array is terminated by 0 ...
Definition: avcodec.h:2818
AVFrame * tmp_frame
Definition: output.c:60
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:972
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:585
int height
Definition: frame.h:174
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2819
int stream_index
Definition: avcodec.h:975
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:741
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:169
#define STREAM_FRAME_RATE
Definition: output.c:46
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:950
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int avresample_open(AVAudioResampleContext *avr)
Initialize AVAudioResampleContext.
Definition: utils.c:36
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:179
float tincr2
Definition: output.c:62
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:966
for(j=16;j >0;--j)
struct SwsContext * sws_ctx
Definition: output.c:64