From bac3ab13ea6a9dd8853e79ef3eacf51d234c8774 Mon Sep 17 00:00:00 2001 From: Dustin Brody Date: Thu, 11 Aug 2011 08:57:58 -0400 Subject: [PATCH 01/20] h264: notice memory allocation failure Signed-off-by: Ronald S. Bultje --- libavcodec/h264.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 9ab249826b..c2229ffcce 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -1138,7 +1138,10 @@ static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContex memcpy(&h->s + 1, &h1->s + 1, sizeof(H264Context) - sizeof(MpegEncContext)); //copy all fields after MpegEnc memset(h->sps_buffers, 0, sizeof(h->sps_buffers)); memset(h->pps_buffers, 0, sizeof(h->pps_buffers)); - ff_h264_alloc_tables(h); + if (ff_h264_alloc_tables(h) < 0) { + av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n"); + return AVERROR(ENOMEM); + } context_init(h); for(i=0; i<2; i++){ @@ -2597,7 +2600,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0){ h->prev_interlaced_frame = 1; init_scan_tables(h); - ff_h264_alloc_tables(h); + if (ff_h264_alloc_tables(h) < 0) { + av_log(h->s.avctx, AV_LOG_ERROR, "Could not allocate memory for h264\n"); + return AVERROR(ENOMEM); + } if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_SLICE)) { if (context_init(h) < 0) { From 21d70372341d0e7a1c5ac34c4522850f40c503d5 Mon Sep 17 00:00:00 2001 From: Dustin Brody Date: Thu, 11 Aug 2011 11:16:09 -0400 Subject: [PATCH 02/20] mjpeg: propagate decode errors from ff_mjpeg_decode_sos and ff_mjpeg_decode_dqt Signed-off-by: Ronald S. Bultje --- libavcodec/mjpegbdec.c | 8 ++++++-- libavcodec/mjpegdec.c | 4 +++- libavcodec/mxpegdec.c | 8 ++++++-- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/libavcodec/mjpegbdec.c b/libavcodec/mjpegbdec.c index be5d853624..a9f94f1c5b 100644 --- a/libavcodec/mjpegbdec.c +++ b/libavcodec/mjpegbdec.c @@ -81,7 +81,9 @@ read_header: { init_get_bits(&s->gb, buf_ptr+dqt_offs, (buf_end - (buf_ptr+dqt_offs))*8); s->start_code = DQT; - ff_mjpeg_decode_dqt(s); + if (ff_mjpeg_decode_dqt(s) < 0 && + avctx->error_recognition >= FF_ER_EXPLODE) + return AVERROR_INVALIDDATA; } dht_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "dht is %d and size is %d\n"); @@ -113,7 +115,9 @@ read_header: init_get_bits(&s->gb, buf_ptr+sos_offs, field_size*8); s->mjpb_skiptosod = (sod_offs - sos_offs - show_bits(&s->gb, 16)); s->start_code = SOS; - ff_mjpeg_decode_sos(s, NULL, NULL); + if (ff_mjpeg_decode_sos(s, NULL, NULL) < 0 && + avctx->error_recognition >= FF_ER_EXPLODE) + return AVERROR_INVALIDDATA; } if (s->interlaced) { diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c index c4cfd1af18..81effb4f8c 100644 --- a/libavcodec/mjpegdec.c +++ b/libavcodec/mjpegdec.c @@ -1525,7 +1525,9 @@ eoi_parser: av_log(avctx, AV_LOG_WARNING, "Can not process SOS before SOF, skipping\n"); break; } - ff_mjpeg_decode_sos(s, NULL, NULL); + if (ff_mjpeg_decode_sos(s, NULL, NULL) < 0 && + avctx->error_recognition >= FF_ER_EXPLODE) + return AVERROR_INVALIDDATA; /* buggy avid puts EOI every 10-20th frame */ /* if restart period is over process EOI */ if ((s->buggy_avid && !s->interlaced) || s->restart_interval) diff --git a/libavcodec/mxpegdec.c b/libavcodec/mxpegdec.c index e710291501..92fd244ec7 100644 --- a/libavcodec/mxpegdec.c +++ b/libavcodec/mxpegdec.c @@ -275,9 +275,13 @@ static int mxpeg_decode_frame(AVCodecContext *avctx, return AVERROR(ENOMEM); } - ff_mjpeg_decode_sos(jpg, s->mxm_bitmask, reference_ptr); + ret = ff_mjpeg_decode_sos(jpg, s->mxm_bitmask, reference_ptr); + if (ret < 0 && avctx->error_recognition >= FF_ER_EXPLODE) + return ret; } else { - ff_mjpeg_decode_sos(jpg, NULL, NULL); + ret = ff_mjpeg_decode_sos(jpg, NULL, NULL); + if (ret < 0 && avctx->error_recognition >= FF_ER_EXPLODE) + return ret; } break; From d241f51e0f7c08060d2fa72117e2a1f273ab0c72 Mon Sep 17 00:00:00 2001 From: Kostya Shishkov Date: Tue, 9 Aug 2011 11:00:09 +0200 Subject: [PATCH 03/20] Move RV3/4-specific DSP functions into their own context Signed-off-by: Ronald S. Bultje --- libavcodec/dsputil.c | 28 +------ libavcodec/dsputil.h | 18 ++--- libavcodec/rv30dsp.c | 80 ++++++++++---------- libavcodec/rv34.c | 39 +++++----- libavcodec/rv34.h | 2 + libavcodec/rv34dsp.h | 44 +++++++++++ libavcodec/rv40dsp.c | 138 ++++++++++++++++++----------------- libavcodec/x86/Makefile | 2 + libavcodec/x86/dsputil_mmx.c | 20 ----- libavcodec/x86/rv40dsp.c | 60 +++++++++++++++ 10 files changed, 253 insertions(+), 178 deletions(-) create mode 100644 libavcodec/rv34dsp.h create mode 100644 libavcodec/x86/rv40dsp.c diff --git a/libavcodec/dsputil.c b/libavcodec/dsputil.c index d31860166d..039cf0b213 100644 --- a/libavcodec/dsputil.c +++ b/libavcodec/dsputil.c @@ -1280,16 +1280,16 @@ static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int } #if CONFIG_RV40_DECODER -static void put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){ +void ff_put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){ put_pixels16_xy2_8_c(dst, src, stride, 16); } -static void avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){ +void ff_avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){ avg_pixels16_xy2_8_c(dst, src, stride, 16); } -static void put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){ +void ff_put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){ put_pixels8_xy2_8_c(dst, src, stride, 8); } -static void avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){ +void ff_avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){ avg_pixels8_xy2_8_c(dst, src, stride, 8); } #endif /* CONFIG_RV40_DECODER */ @@ -2903,16 +2903,6 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) #if CONFIG_WMV2_DECODER || CONFIG_VC1_DECODER ff_intrax8dsp_init(c,avctx); #endif -#if CONFIG_RV30_DECODER - ff_rv30dsp_init(c,avctx); -#endif -#if CONFIG_RV40_DECODER - ff_rv40dsp_init(c,avctx); - c->put_rv40_qpel_pixels_tab[0][15] = put_rv40_qpel16_mc33_c; - c->avg_rv40_qpel_pixels_tab[0][15] = avg_rv40_qpel16_mc33_c; - c->put_rv40_qpel_pixels_tab[1][15] = put_rv40_qpel8_mc33_c; - c->avg_rv40_qpel_pixels_tab[1][15] = avg_rv40_qpel8_mc33_c; -#endif c->put_mspel_pixels_tab[0]= ff_put_pixels8x8_c; c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c; @@ -3124,16 +3114,6 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->avg_2tap_qpel_pixels_tab[0][i]= c->avg_h264_qpel_pixels_tab[0][i]; } - c->put_rv30_tpel_pixels_tab[0][0] = c->put_h264_qpel_pixels_tab[0][0]; - c->put_rv30_tpel_pixels_tab[1][0] = c->put_h264_qpel_pixels_tab[1][0]; - c->avg_rv30_tpel_pixels_tab[0][0] = c->avg_h264_qpel_pixels_tab[0][0]; - c->avg_rv30_tpel_pixels_tab[1][0] = c->avg_h264_qpel_pixels_tab[1][0]; - - c->put_rv40_qpel_pixels_tab[0][0] = c->put_h264_qpel_pixels_tab[0][0]; - c->put_rv40_qpel_pixels_tab[1][0] = c->put_h264_qpel_pixels_tab[1][0]; - c->avg_rv40_qpel_pixels_tab[0][0] = c->avg_h264_qpel_pixels_tab[0][0]; - c->avg_rv40_qpel_pixels_tab[1][0] = c->avg_h264_qpel_pixels_tab[1][0]; - switch(c->idct_permutation_type){ case FF_NO_IDCT_PERM: for(i=0; i<64; i++) diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h index 536357a529..4d783cf266 100644 --- a/libavcodec/dsputil.h +++ b/libavcodec/dsputil.h @@ -114,6 +114,12 @@ void ff_vp3_h_loop_filter_c(uint8_t *src, int stride, int *bounding_values); /* EA functions */ void ff_ea_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block); +/* RV40 functions */ +void ff_put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride); +void ff_avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride); +void ff_put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride); +void ff_avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride); + /* 1/2^n downscaling functions from imgconvert.c */ void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height); void ff_shrink44(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height); @@ -542,16 +548,6 @@ typedef struct DSPContext { void (*vector_clip_int32)(int32_t *dst, const int32_t *src, int32_t min, int32_t max, unsigned int len); - /* rv30 functions */ - qpel_mc_func put_rv30_tpel_pixels_tab[4][16]; - qpel_mc_func avg_rv30_tpel_pixels_tab[4][16]; - - /* rv40 functions */ - qpel_mc_func put_rv40_qpel_pixels_tab[4][16]; - qpel_mc_func avg_rv40_qpel_pixels_tab[4][16]; - h264_chroma_mc_func put_rv40_chroma_pixels_tab[3]; - h264_chroma_mc_func avg_rv40_chroma_pixels_tab[3]; - op_fill_func fill_block_tab[2]; } DSPContext; @@ -626,8 +622,6 @@ void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx); void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx); void ff_dsputil_init_dwt(DSPContext *c); -void ff_rv30dsp_init(DSPContext* c, AVCodecContext *avctx); -void ff_rv40dsp_init(DSPContext* c, AVCodecContext *avctx); void ff_intrax8dsp_init(DSPContext* c, AVCodecContext *avctx); void ff_mlp_init(DSPContext* c, AVCodecContext *avctx); void ff_mlp_init_x86(DSPContext* c, AVCodecContext *avctx); diff --git a/libavcodec/rv30dsp.c b/libavcodec/rv30dsp.c index 4ead774f9e..6ba1a6bfc1 100644 --- a/libavcodec/rv30dsp.c +++ b/libavcodec/rv30dsp.c @@ -26,6 +26,7 @@ #include "avcodec.h" #include "dsputil.h" +#include "rv34dsp.h" #define RV30_LOWPASS(OPNAME, OP) \ static av_unused void OPNAME ## rv30_tpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, const int C1, const int C2){\ @@ -251,41 +252,46 @@ RV30_MC(put_, 16) RV30_MC(avg_, 8) RV30_MC(avg_, 16) -av_cold void ff_rv30dsp_init(DSPContext* c, AVCodecContext *avctx) { - c->put_rv30_tpel_pixels_tab[0][ 0] = c->put_h264_qpel_pixels_tab[0][0]; - c->put_rv30_tpel_pixels_tab[0][ 1] = put_rv30_tpel16_mc10_c; - c->put_rv30_tpel_pixels_tab[0][ 2] = put_rv30_tpel16_mc20_c; - c->put_rv30_tpel_pixels_tab[0][ 4] = put_rv30_tpel16_mc01_c; - c->put_rv30_tpel_pixels_tab[0][ 5] = put_rv30_tpel16_mc11_c; - c->put_rv30_tpel_pixels_tab[0][ 6] = put_rv30_tpel16_mc21_c; - c->put_rv30_tpel_pixels_tab[0][ 8] = put_rv30_tpel16_mc02_c; - c->put_rv30_tpel_pixels_tab[0][ 9] = put_rv30_tpel16_mc12_c; - c->put_rv30_tpel_pixels_tab[0][10] = put_rv30_tpel16_mc22_c; - c->avg_rv30_tpel_pixels_tab[0][ 0] = c->avg_h264_qpel_pixels_tab[0][0]; - c->avg_rv30_tpel_pixels_tab[0][ 1] = avg_rv30_tpel16_mc10_c; - c->avg_rv30_tpel_pixels_tab[0][ 2] = avg_rv30_tpel16_mc20_c; - c->avg_rv30_tpel_pixels_tab[0][ 4] = avg_rv30_tpel16_mc01_c; - c->avg_rv30_tpel_pixels_tab[0][ 5] = avg_rv30_tpel16_mc11_c; - c->avg_rv30_tpel_pixels_tab[0][ 6] = avg_rv30_tpel16_mc21_c; - c->avg_rv30_tpel_pixels_tab[0][ 8] = avg_rv30_tpel16_mc02_c; - c->avg_rv30_tpel_pixels_tab[0][ 9] = avg_rv30_tpel16_mc12_c; - c->avg_rv30_tpel_pixels_tab[0][10] = avg_rv30_tpel16_mc22_c; - c->put_rv30_tpel_pixels_tab[1][ 0] = c->put_h264_qpel_pixels_tab[1][0]; - c->put_rv30_tpel_pixels_tab[1][ 1] = put_rv30_tpel8_mc10_c; - c->put_rv30_tpel_pixels_tab[1][ 2] = put_rv30_tpel8_mc20_c; - c->put_rv30_tpel_pixels_tab[1][ 4] = put_rv30_tpel8_mc01_c; - c->put_rv30_tpel_pixels_tab[1][ 5] = put_rv30_tpel8_mc11_c; - c->put_rv30_tpel_pixels_tab[1][ 6] = put_rv30_tpel8_mc21_c; - c->put_rv30_tpel_pixels_tab[1][ 8] = put_rv30_tpel8_mc02_c; - c->put_rv30_tpel_pixels_tab[1][ 9] = put_rv30_tpel8_mc12_c; - c->put_rv30_tpel_pixels_tab[1][10] = put_rv30_tpel8_mc22_c; - c->avg_rv30_tpel_pixels_tab[1][ 0] = c->avg_h264_qpel_pixels_tab[1][0]; - c->avg_rv30_tpel_pixels_tab[1][ 1] = avg_rv30_tpel8_mc10_c; - c->avg_rv30_tpel_pixels_tab[1][ 2] = avg_rv30_tpel8_mc20_c; - c->avg_rv30_tpel_pixels_tab[1][ 4] = avg_rv30_tpel8_mc01_c; - c->avg_rv30_tpel_pixels_tab[1][ 5] = avg_rv30_tpel8_mc11_c; - c->avg_rv30_tpel_pixels_tab[1][ 6] = avg_rv30_tpel8_mc21_c; - c->avg_rv30_tpel_pixels_tab[1][ 8] = avg_rv30_tpel8_mc02_c; - c->avg_rv30_tpel_pixels_tab[1][ 9] = avg_rv30_tpel8_mc12_c; - c->avg_rv30_tpel_pixels_tab[1][10] = avg_rv30_tpel8_mc22_c; +av_cold void ff_rv30dsp_init(RV34DSPContext *c, DSPContext* dsp) { + c->put_pixels_tab[0][ 0] = dsp->put_h264_qpel_pixels_tab[0][0]; + c->put_pixels_tab[0][ 1] = put_rv30_tpel16_mc10_c; + c->put_pixels_tab[0][ 2] = put_rv30_tpel16_mc20_c; + c->put_pixels_tab[0][ 4] = put_rv30_tpel16_mc01_c; + c->put_pixels_tab[0][ 5] = put_rv30_tpel16_mc11_c; + c->put_pixels_tab[0][ 6] = put_rv30_tpel16_mc21_c; + c->put_pixels_tab[0][ 8] = put_rv30_tpel16_mc02_c; + c->put_pixels_tab[0][ 9] = put_rv30_tpel16_mc12_c; + c->put_pixels_tab[0][10] = put_rv30_tpel16_mc22_c; + c->avg_pixels_tab[0][ 0] = dsp->avg_h264_qpel_pixels_tab[0][0]; + c->avg_pixels_tab[0][ 1] = avg_rv30_tpel16_mc10_c; + c->avg_pixels_tab[0][ 2] = avg_rv30_tpel16_mc20_c; + c->avg_pixels_tab[0][ 4] = avg_rv30_tpel16_mc01_c; + c->avg_pixels_tab[0][ 5] = avg_rv30_tpel16_mc11_c; + c->avg_pixels_tab[0][ 6] = avg_rv30_tpel16_mc21_c; + c->avg_pixels_tab[0][ 8] = avg_rv30_tpel16_mc02_c; + c->avg_pixels_tab[0][ 9] = avg_rv30_tpel16_mc12_c; + c->avg_pixels_tab[0][10] = avg_rv30_tpel16_mc22_c; + c->put_pixels_tab[1][ 0] = dsp->put_h264_qpel_pixels_tab[1][0]; + c->put_pixels_tab[1][ 1] = put_rv30_tpel8_mc10_c; + c->put_pixels_tab[1][ 2] = put_rv30_tpel8_mc20_c; + c->put_pixels_tab[1][ 4] = put_rv30_tpel8_mc01_c; + c->put_pixels_tab[1][ 5] = put_rv30_tpel8_mc11_c; + c->put_pixels_tab[1][ 6] = put_rv30_tpel8_mc21_c; + c->put_pixels_tab[1][ 8] = put_rv30_tpel8_mc02_c; + c->put_pixels_tab[1][ 9] = put_rv30_tpel8_mc12_c; + c->put_pixels_tab[1][10] = put_rv30_tpel8_mc22_c; + c->avg_pixels_tab[1][ 0] = dsp->avg_h264_qpel_pixels_tab[1][0]; + c->avg_pixels_tab[1][ 1] = avg_rv30_tpel8_mc10_c; + c->avg_pixels_tab[1][ 2] = avg_rv30_tpel8_mc20_c; + c->avg_pixels_tab[1][ 4] = avg_rv30_tpel8_mc01_c; + c->avg_pixels_tab[1][ 5] = avg_rv30_tpel8_mc11_c; + c->avg_pixels_tab[1][ 6] = avg_rv30_tpel8_mc21_c; + c->avg_pixels_tab[1][ 8] = avg_rv30_tpel8_mc02_c; + c->avg_pixels_tab[1][ 9] = avg_rv30_tpel8_mc12_c; + c->avg_pixels_tab[1][10] = avg_rv30_tpel8_mc22_c; + + c->put_chroma_pixels_tab[0] = dsp->put_h264_chroma_pixels_tab[0]; + c->put_chroma_pixels_tab[1] = dsp->put_h264_chroma_pixels_tab[1]; + c->avg_chroma_pixels_tab[0] = dsp->avg_h264_chroma_pixels_tab[0]; + c->avg_chroma_pixels_tab[1] = dsp->avg_h264_chroma_pixels_tab[1]; } diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c index f9773cdc45..f8192a8f8f 100644 --- a/libavcodec/rv34.c +++ b/libavcodec/rv34.c @@ -809,24 +809,18 @@ static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int width, const int height, int dir) { rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, - r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab - : r->s.dsp.put_rv40_qpel_pixels_tab, - r->rv30 ? r->s.dsp.put_h264_chroma_pixels_tab - : r->s.dsp.put_rv40_chroma_pixels_tab); + r->rdsp.put_pixels_tab, + r->rdsp.put_chroma_pixels_tab); } static void rv34_mc_2mv(RV34DecContext *r, const int block_type) { rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, - r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab - : r->s.dsp.put_rv40_qpel_pixels_tab, - r->rv30 ? r->s.dsp.put_h264_chroma_pixels_tab - : r->s.dsp.put_rv40_chroma_pixels_tab); + r->rdsp.put_pixels_tab, + r->rdsp.put_chroma_pixels_tab); rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, - r->rv30 ? r->s.dsp.avg_rv30_tpel_pixels_tab - : r->s.dsp.avg_rv40_qpel_pixels_tab, - r->rv30 ? r->s.dsp.avg_h264_chroma_pixels_tab - : r->s.dsp.avg_rv40_chroma_pixels_tab); + r->rdsp.avg_pixels_tab, + r->rdsp.avg_chroma_pixels_tab); } static void rv34_mc_2mv_skip(RV34DecContext *r) @@ -835,15 +829,11 @@ static void rv34_mc_2mv_skip(RV34DecContext *r) for(j = 0; j < 2; j++) for(i = 0; i < 2; i++){ rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30, - r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab - : r->s.dsp.put_rv40_qpel_pixels_tab, - r->rv30 ? r->s.dsp.put_h264_chroma_pixels_tab - : r->s.dsp.put_rv40_chroma_pixels_tab); + r->rdsp.put_pixels_tab, + r->rdsp.put_chroma_pixels_tab); rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30, - r->rv30 ? r->s.dsp.avg_rv30_tpel_pixels_tab - : r->s.dsp.avg_rv40_qpel_pixels_tab, - r->rv30 ? r->s.dsp.avg_h264_chroma_pixels_tab - : r->s.dsp.avg_rv40_chroma_pixels_tab); + r->rdsp.avg_pixels_tab, + r->rdsp.avg_chroma_pixels_tab); } } @@ -1363,6 +1353,15 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx) ff_h264_pred_init(&r->h, CODEC_ID_RV40, 8); +#if CONFIG_RV30_DECODER + if (avctx->codec_id == CODEC_ID_RV30) + ff_rv30dsp_init(&r->rdsp, &r->s.dsp); +#endif +#if CONFIG_RV40_DECODER + if (avctx->codec_id == CODEC_ID_RV40) + ff_rv40dsp_init(&r->rdsp, &r->s.dsp); +#endif + r->intra_types_stride = 4*s->mb_stride + 4; r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist)); r->intra_types = r->intra_types_hist + r->intra_types_stride * 4; diff --git a/libavcodec/rv34.h b/libavcodec/rv34.h index c9f4ff7a13..811afb4a80 100644 --- a/libavcodec/rv34.h +++ b/libavcodec/rv34.h @@ -32,6 +32,7 @@ #include "mpegvideo.h" #include "h264pred.h" +#include "rv34dsp.h" #define MB_TYPE_SEPARATE_DC 0x01000000 #define IS_SEPARATE_DC(a) ((a) & MB_TYPE_SEPARATE_DC) @@ -83,6 +84,7 @@ typedef struct SliceInfo{ /** decoder context */ typedef struct RV34DecContext{ MpegEncContext s; + RV34DSPContext rdsp; int8_t *intra_types_hist;///< old block types, used for prediction int8_t *intra_types; ///< block types int intra_types_stride;///< block types array stride diff --git a/libavcodec/rv34dsp.h b/libavcodec/rv34dsp.h new file mode 100644 index 0000000000..771a6c0f08 --- /dev/null +++ b/libavcodec/rv34dsp.h @@ -0,0 +1,44 @@ +/* + * RV30/40 decoder motion compensation functions + * Copyright (c) 2008 Konstantin Shishkov + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * RV30/40 decoder motion compensation functions + */ + +#ifndef AVCODEC_RV34DSP_H +#define AVCODEC_RV34DSP_H + +#include "dsputil.h" + +typedef struct RV34DSPContext { + qpel_mc_func put_pixels_tab[4][16]; + qpel_mc_func avg_pixels_tab[4][16]; + h264_chroma_mc_func put_chroma_pixels_tab[3]; + h264_chroma_mc_func avg_chroma_pixels_tab[3]; +} RV34DSPContext; + +void ff_rv30dsp_init(RV34DSPContext *c, DSPContext* dsp); +void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp); + +void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp); + +#endif /* AVCODEC_RV34DSP_H */ diff --git a/libavcodec/rv40dsp.c b/libavcodec/rv40dsp.c index 77f2002684..132f063dfa 100644 --- a/libavcodec/rv40dsp.c +++ b/libavcodec/rv40dsp.c @@ -26,6 +26,7 @@ #include "avcodec.h" #include "dsputil.h" +#include "rv34dsp.h" #define RV40_LOWPASS(OPNAME, OP) \ static av_unused void OPNAME ## rv40_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\ @@ -284,70 +285,77 @@ static void OPNAME ## rv40_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*a RV40_CHROMA_MC(put_, op_put) RV40_CHROMA_MC(avg_, op_avg) -void ff_rv40dsp_init(DSPContext* c, AVCodecContext *avctx) { - c->put_rv40_qpel_pixels_tab[0][ 0] = c->put_h264_qpel_pixels_tab[0][0]; - c->put_rv40_qpel_pixels_tab[0][ 1] = put_rv40_qpel16_mc10_c; - c->put_rv40_qpel_pixels_tab[0][ 2] = put_rv40_qpel16_mc20_c; - c->put_rv40_qpel_pixels_tab[0][ 3] = put_rv40_qpel16_mc30_c; - c->put_rv40_qpel_pixels_tab[0][ 4] = put_rv40_qpel16_mc01_c; - c->put_rv40_qpel_pixels_tab[0][ 5] = put_rv40_qpel16_mc11_c; - c->put_rv40_qpel_pixels_tab[0][ 6] = put_rv40_qpel16_mc21_c; - c->put_rv40_qpel_pixels_tab[0][ 7] = put_rv40_qpel16_mc31_c; - c->put_rv40_qpel_pixels_tab[0][ 8] = put_rv40_qpel16_mc02_c; - c->put_rv40_qpel_pixels_tab[0][ 9] = put_rv40_qpel16_mc12_c; - c->put_rv40_qpel_pixels_tab[0][10] = put_rv40_qpel16_mc22_c; - c->put_rv40_qpel_pixels_tab[0][11] = put_rv40_qpel16_mc32_c; - c->put_rv40_qpel_pixels_tab[0][12] = put_rv40_qpel16_mc03_c; - c->put_rv40_qpel_pixels_tab[0][13] = put_rv40_qpel16_mc13_c; - c->put_rv40_qpel_pixels_tab[0][14] = put_rv40_qpel16_mc23_c; - c->avg_rv40_qpel_pixels_tab[0][ 0] = c->avg_h264_qpel_pixels_tab[0][0]; - c->avg_rv40_qpel_pixels_tab[0][ 1] = avg_rv40_qpel16_mc10_c; - c->avg_rv40_qpel_pixels_tab[0][ 2] = avg_rv40_qpel16_mc20_c; - c->avg_rv40_qpel_pixels_tab[0][ 3] = avg_rv40_qpel16_mc30_c; - c->avg_rv40_qpel_pixels_tab[0][ 4] = avg_rv40_qpel16_mc01_c; - c->avg_rv40_qpel_pixels_tab[0][ 5] = avg_rv40_qpel16_mc11_c; - c->avg_rv40_qpel_pixels_tab[0][ 6] = avg_rv40_qpel16_mc21_c; - c->avg_rv40_qpel_pixels_tab[0][ 7] = avg_rv40_qpel16_mc31_c; - c->avg_rv40_qpel_pixels_tab[0][ 8] = avg_rv40_qpel16_mc02_c; - c->avg_rv40_qpel_pixels_tab[0][ 9] = avg_rv40_qpel16_mc12_c; - c->avg_rv40_qpel_pixels_tab[0][10] = avg_rv40_qpel16_mc22_c; - c->avg_rv40_qpel_pixels_tab[0][11] = avg_rv40_qpel16_mc32_c; - c->avg_rv40_qpel_pixels_tab[0][12] = avg_rv40_qpel16_mc03_c; - c->avg_rv40_qpel_pixels_tab[0][13] = avg_rv40_qpel16_mc13_c; - c->avg_rv40_qpel_pixels_tab[0][14] = avg_rv40_qpel16_mc23_c; - c->put_rv40_qpel_pixels_tab[1][ 0] = c->put_h264_qpel_pixels_tab[1][0]; - c->put_rv40_qpel_pixels_tab[1][ 1] = put_rv40_qpel8_mc10_c; - c->put_rv40_qpel_pixels_tab[1][ 2] = put_rv40_qpel8_mc20_c; - c->put_rv40_qpel_pixels_tab[1][ 3] = put_rv40_qpel8_mc30_c; - c->put_rv40_qpel_pixels_tab[1][ 4] = put_rv40_qpel8_mc01_c; - c->put_rv40_qpel_pixels_tab[1][ 5] = put_rv40_qpel8_mc11_c; - c->put_rv40_qpel_pixels_tab[1][ 6] = put_rv40_qpel8_mc21_c; - c->put_rv40_qpel_pixels_tab[1][ 7] = put_rv40_qpel8_mc31_c; - c->put_rv40_qpel_pixels_tab[1][ 8] = put_rv40_qpel8_mc02_c; - c->put_rv40_qpel_pixels_tab[1][ 9] = put_rv40_qpel8_mc12_c; - c->put_rv40_qpel_pixels_tab[1][10] = put_rv40_qpel8_mc22_c; - c->put_rv40_qpel_pixels_tab[1][11] = put_rv40_qpel8_mc32_c; - c->put_rv40_qpel_pixels_tab[1][12] = put_rv40_qpel8_mc03_c; - c->put_rv40_qpel_pixels_tab[1][13] = put_rv40_qpel8_mc13_c; - c->put_rv40_qpel_pixels_tab[1][14] = put_rv40_qpel8_mc23_c; - c->avg_rv40_qpel_pixels_tab[1][ 0] = c->avg_h264_qpel_pixels_tab[1][0]; - c->avg_rv40_qpel_pixels_tab[1][ 1] = avg_rv40_qpel8_mc10_c; - c->avg_rv40_qpel_pixels_tab[1][ 2] = avg_rv40_qpel8_mc20_c; - c->avg_rv40_qpel_pixels_tab[1][ 3] = avg_rv40_qpel8_mc30_c; - c->avg_rv40_qpel_pixels_tab[1][ 4] = avg_rv40_qpel8_mc01_c; - c->avg_rv40_qpel_pixels_tab[1][ 5] = avg_rv40_qpel8_mc11_c; - c->avg_rv40_qpel_pixels_tab[1][ 6] = avg_rv40_qpel8_mc21_c; - c->avg_rv40_qpel_pixels_tab[1][ 7] = avg_rv40_qpel8_mc31_c; - c->avg_rv40_qpel_pixels_tab[1][ 8] = avg_rv40_qpel8_mc02_c; - c->avg_rv40_qpel_pixels_tab[1][ 9] = avg_rv40_qpel8_mc12_c; - c->avg_rv40_qpel_pixels_tab[1][10] = avg_rv40_qpel8_mc22_c; - c->avg_rv40_qpel_pixels_tab[1][11] = avg_rv40_qpel8_mc32_c; - c->avg_rv40_qpel_pixels_tab[1][12] = avg_rv40_qpel8_mc03_c; - c->avg_rv40_qpel_pixels_tab[1][13] = avg_rv40_qpel8_mc13_c; - c->avg_rv40_qpel_pixels_tab[1][14] = avg_rv40_qpel8_mc23_c; +av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) { + c->put_pixels_tab[0][ 0] = dsp->put_h264_qpel_pixels_tab[0][0]; + c->put_pixels_tab[0][ 1] = put_rv40_qpel16_mc10_c; + c->put_pixels_tab[0][ 2] = put_rv40_qpel16_mc20_c; + c->put_pixels_tab[0][ 3] = put_rv40_qpel16_mc30_c; + c->put_pixels_tab[0][ 4] = put_rv40_qpel16_mc01_c; + c->put_pixels_tab[0][ 5] = put_rv40_qpel16_mc11_c; + c->put_pixels_tab[0][ 6] = put_rv40_qpel16_mc21_c; + c->put_pixels_tab[0][ 7] = put_rv40_qpel16_mc31_c; + c->put_pixels_tab[0][ 8] = put_rv40_qpel16_mc02_c; + c->put_pixels_tab[0][ 9] = put_rv40_qpel16_mc12_c; + c->put_pixels_tab[0][10] = put_rv40_qpel16_mc22_c; + c->put_pixels_tab[0][11] = put_rv40_qpel16_mc32_c; + c->put_pixels_tab[0][12] = put_rv40_qpel16_mc03_c; + c->put_pixels_tab[0][13] = put_rv40_qpel16_mc13_c; + c->put_pixels_tab[0][14] = put_rv40_qpel16_mc23_c; + c->put_pixels_tab[0][15] = ff_put_rv40_qpel16_mc33_c; + c->avg_pixels_tab[0][ 0] = dsp->avg_h264_qpel_pixels_tab[0][0]; + c->avg_pixels_tab[0][ 1] = avg_rv40_qpel16_mc10_c; + c->avg_pixels_tab[0][ 2] = avg_rv40_qpel16_mc20_c; + c->avg_pixels_tab[0][ 3] = avg_rv40_qpel16_mc30_c; + c->avg_pixels_tab[0][ 4] = avg_rv40_qpel16_mc01_c; + c->avg_pixels_tab[0][ 5] = avg_rv40_qpel16_mc11_c; + c->avg_pixels_tab[0][ 6] = avg_rv40_qpel16_mc21_c; + c->avg_pixels_tab[0][ 7] = avg_rv40_qpel16_mc31_c; + c->avg_pixels_tab[0][ 8] = avg_rv40_qpel16_mc02_c; + c->avg_pixels_tab[0][ 9] = avg_rv40_qpel16_mc12_c; + c->avg_pixels_tab[0][10] = avg_rv40_qpel16_mc22_c; + c->avg_pixels_tab[0][11] = avg_rv40_qpel16_mc32_c; + c->avg_pixels_tab[0][12] = avg_rv40_qpel16_mc03_c; + c->avg_pixels_tab[0][13] = avg_rv40_qpel16_mc13_c; + c->avg_pixels_tab[0][14] = avg_rv40_qpel16_mc23_c; + c->avg_pixels_tab[0][15] = ff_avg_rv40_qpel16_mc33_c; + c->put_pixels_tab[1][ 0] = dsp->put_h264_qpel_pixels_tab[1][0]; + c->put_pixels_tab[1][ 1] = put_rv40_qpel8_mc10_c; + c->put_pixels_tab[1][ 2] = put_rv40_qpel8_mc20_c; + c->put_pixels_tab[1][ 3] = put_rv40_qpel8_mc30_c; + c->put_pixels_tab[1][ 4] = put_rv40_qpel8_mc01_c; + c->put_pixels_tab[1][ 5] = put_rv40_qpel8_mc11_c; + c->put_pixels_tab[1][ 6] = put_rv40_qpel8_mc21_c; + c->put_pixels_tab[1][ 7] = put_rv40_qpel8_mc31_c; + c->put_pixels_tab[1][ 8] = put_rv40_qpel8_mc02_c; + c->put_pixels_tab[1][ 9] = put_rv40_qpel8_mc12_c; + c->put_pixels_tab[1][10] = put_rv40_qpel8_mc22_c; + c->put_pixels_tab[1][11] = put_rv40_qpel8_mc32_c; + c->put_pixels_tab[1][12] = put_rv40_qpel8_mc03_c; + c->put_pixels_tab[1][13] = put_rv40_qpel8_mc13_c; + c->put_pixels_tab[1][14] = put_rv40_qpel8_mc23_c; + c->put_pixels_tab[1][15] = ff_put_rv40_qpel8_mc33_c; + c->avg_pixels_tab[1][ 0] = dsp->avg_h264_qpel_pixels_tab[1][0]; + c->avg_pixels_tab[1][ 1] = avg_rv40_qpel8_mc10_c; + c->avg_pixels_tab[1][ 2] = avg_rv40_qpel8_mc20_c; + c->avg_pixels_tab[1][ 3] = avg_rv40_qpel8_mc30_c; + c->avg_pixels_tab[1][ 4] = avg_rv40_qpel8_mc01_c; + c->avg_pixels_tab[1][ 5] = avg_rv40_qpel8_mc11_c; + c->avg_pixels_tab[1][ 6] = avg_rv40_qpel8_mc21_c; + c->avg_pixels_tab[1][ 7] = avg_rv40_qpel8_mc31_c; + c->avg_pixels_tab[1][ 8] = avg_rv40_qpel8_mc02_c; + c->avg_pixels_tab[1][ 9] = avg_rv40_qpel8_mc12_c; + c->avg_pixels_tab[1][10] = avg_rv40_qpel8_mc22_c; + c->avg_pixels_tab[1][11] = avg_rv40_qpel8_mc32_c; + c->avg_pixels_tab[1][12] = avg_rv40_qpel8_mc03_c; + c->avg_pixels_tab[1][13] = avg_rv40_qpel8_mc13_c; + c->avg_pixels_tab[1][14] = avg_rv40_qpel8_mc23_c; + c->avg_pixels_tab[1][15] = ff_avg_rv40_qpel8_mc33_c; - c->put_rv40_chroma_pixels_tab[0] = put_rv40_chroma_mc8_c; - c->put_rv40_chroma_pixels_tab[1] = put_rv40_chroma_mc4_c; - c->avg_rv40_chroma_pixels_tab[0] = avg_rv40_chroma_mc8_c; - c->avg_rv40_chroma_pixels_tab[1] = avg_rv40_chroma_mc4_c; + c->put_chroma_pixels_tab[0] = put_rv40_chroma_mc8_c; + c->put_chroma_pixels_tab[1] = put_rv40_chroma_mc4_c; + c->avg_chroma_pixels_tab[0] = avg_rv40_chroma_mc8_c; + c->avg_chroma_pixels_tab[1] = avg_rv40_chroma_mc4_c; + + if (HAVE_MMX) + ff_rv40dsp_init_x86(c, dsp); } diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile index d3cf0da72b..a94f97a270 100644 --- a/libavcodec/x86/Makefile +++ b/libavcodec/x86/Makefile @@ -21,6 +21,8 @@ YASM-OBJS-$(CONFIG_H264PRED) += x86/h264_intrapred.o \ x86/h264_intrapred_10bit.o MMX-OBJS-$(CONFIG_H264PRED) += x86/h264_intrapred_init.o +MMX-OBJS-$(CONFIG_RV40_DECODER) += x86/rv40dsp.o \ + YASM-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_yasm.o MMX-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp_mmx.o diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index 9909fdab78..0cd9601853 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -1895,29 +1895,17 @@ PREFETCH(prefetch_3dnow, prefetch) void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src, int stride, int h, int x, int y); -void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src, - int stride, int h, int x, int y); void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src, int stride, int h, int x, int y); -void ff_avg_rv40_chroma_mc8_mmx2 (uint8_t *dst, uint8_t *src, - int stride, int h, int x, int y); void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src, int stride, int h, int x, int y); -void ff_avg_rv40_chroma_mc8_3dnow (uint8_t *dst, uint8_t *src, - int stride, int h, int x, int y); void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src, int stride, int h, int x, int y); -void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src, - int stride, int h, int x, int y); void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src, int stride, int h, int x, int y); -void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src, - int stride, int h, int x, int y); void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src, int stride, int h, int x, int y); -void ff_avg_rv40_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src, - int stride, int h, int x, int y); void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src, int stride, int h, int x, int y); @@ -2573,9 +2561,6 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx; } - c->put_rv40_chroma_pixels_tab[0]= ff_put_rv40_chroma_mc8_mmx; - c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx; - c->vector_clip_int32 = ff_vector_clip_int32_mmx; #endif @@ -2675,9 +2660,6 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2, ); #if HAVE_YASM - c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2; - c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_mmx2; - if (!high_bit_depth) { c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd; c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2; @@ -2760,8 +2742,6 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow; } - c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_3dnow; - c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_3dnow; #endif } diff --git a/libavcodec/x86/rv40dsp.c b/libavcodec/x86/rv40dsp.c new file mode 100644 index 0000000000..9f90ad8bb6 --- /dev/null +++ b/libavcodec/x86/rv40dsp.c @@ -0,0 +1,60 @@ +/* + * RV40 decoder motion compensation functions x86-optimised + * Copyright (c) 2008 Konstantin Shishkov + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * RV40 decoder motion compensation functions x86-optimised + */ + +#include "libavcodec/rv34dsp.h" + +void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src, + int stride, int h, int x, int y); +void ff_avg_rv40_chroma_mc8_mmx2 (uint8_t *dst, uint8_t *src, + int stride, int h, int x, int y); +void ff_avg_rv40_chroma_mc8_3dnow(uint8_t *dst, uint8_t *src, + int stride, int h, int x, int y); + +void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src, + int stride, int h, int x, int y); +void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src, + int stride, int h, int x, int y); +void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src, + int stride, int h, int x, int y); + +void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp) +{ + av_unused int mm_flags = av_get_cpu_flags(); + +#if HAVE_YASM + if (mm_flags & AV_CPU_FLAG_MMX) { + c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx; + c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx; + } + if (mm_flags & AV_CPU_FLAG_MMX2) { + c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmx2; + c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_mmx2; + } else if (mm_flags & AV_CPU_FLAG_3DNOW) { + c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_3dnow; + c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow; + } +#endif +} From e5ec68699e418b5c6044195fb998ee5287f2bb6f Mon Sep 17 00:00:00 2001 From: Kostya Shishkov Date: Wed, 10 Aug 2011 11:26:24 +0200 Subject: [PATCH 04/20] RV3/4: calculate B-frame motion weights once per frame Signed-off-by: Ronald S. Bultje --- libavcodec/rv34.c | 17 ++++++++++++----- libavcodec/rv34.h | 1 + 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c index f8192a8f8f..58e4552bd8 100644 --- a/libavcodec/rv34.c +++ b/libavcodec/rv34.c @@ -568,12 +568,8 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int */ static int calc_add_mv(RV34DecContext *r, int dir, int val) { - int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts); - int dist = dir ? -GET_PTS_DIFF(r->next_pts, r->cur_pts) : GET_PTS_DIFF(r->cur_pts, r->last_pts); - int mul; + int mul = dir ? -r->weight2 : r->weight1; - if(!refdist) return 0; - mul = (dist << 14) / refdist; return (val * mul + 0x2000) >> 14; } @@ -1273,6 +1269,17 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int if(s->pict_type != AV_PICTURE_TYPE_B){ r->last_pts = r->next_pts; r->next_pts = r->cur_pts; + }else{ + int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts); + int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts); + int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts); + + if(!refdist){ + r->weight1 = r->weight2 = 8192; + }else{ + r->weight1 = (dist0 << 14) / refdist; + r->weight2 = (dist1 << 14) / refdist; + } } s->mb_x = s->mb_y = 0; } diff --git a/libavcodec/rv34.h b/libavcodec/rv34.h index 811afb4a80..ef19813cf8 100644 --- a/libavcodec/rv34.h +++ b/libavcodec/rv34.h @@ -107,6 +107,7 @@ typedef struct RV34DecContext{ int rpr; ///< one field size in RV30 slice header int cur_pts, last_pts, next_pts; + int weight1, weight2; ///< B frame distance fractions (0.14) used in motion compensation uint16_t *cbp_luma; ///< CBP values for luma subblocks uint8_t *cbp_chroma; ///< CBP values for chroma subblocks From b86ab38137be34376c90d45d08d49dbd28f2a72f Mon Sep 17 00:00:00 2001 From: Kostya Shishkov Date: Wed, 10 Aug 2011 11:26:39 +0200 Subject: [PATCH 05/20] Add weighted motion compensation for RV40 B-frames Signed-off-by: Ronald S. Bultje --- libavcodec/rv34.c | 76 +++++++++++++--- libavcodec/rv34.h | 5 ++ libavcodec/rv34dsp.h | 6 ++ libavcodec/rv40dsp.c | 20 +++++ tests/ref/fate/real-rv40 | 186 +++++++++++++++++++-------------------- 5 files changed, 189 insertions(+), 104 deletions(-) diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c index 58e4552bd8..cdc559fa07 100644 --- a/libavcodec/rv34.c +++ b/libavcodec/rv34.c @@ -717,7 +717,7 @@ static const int chroma_coeffs[3] = { 0, 3, 5 }; static inline void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, - const int thirdpel, + const int thirdpel, int weighted, qpel_mc_func (*qpel_mc)[16], h264_chroma_mc_func (*chroma_mc)) { @@ -781,9 +781,15 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type, srcU = uvbuf; srcV = uvbuf + 16; } - Y = s->dest[0] + xoff + yoff *s->linesize; - U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize; - V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize; + if(!weighted){ + Y = s->dest[0] + xoff + yoff *s->linesize; + U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize; + V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize; + }else{ + Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize; + U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize; + V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize; + } if(block_type == RV34_MB_P_16x8){ qpel_mc[1][dxy](Y, srcY, s->linesize); @@ -804,33 +810,70 @@ static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir) { - rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, + rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0, r->rdsp.put_pixels_tab, r->rdsp.put_chroma_pixels_tab); } +static void rv4_weight(RV34DecContext *r) +{ + r->rdsp.rv40_weight_pixels_tab[0](r->s.dest[0], + r->tmp_b_block_y[0], + r->tmp_b_block_y[1], + r->weight1, + r->weight2, + r->s.linesize); + r->rdsp.rv40_weight_pixels_tab[1](r->s.dest[1], + r->tmp_b_block_uv[0], + r->tmp_b_block_uv[2], + r->weight1, + r->weight2, + r->s.uvlinesize); + r->rdsp.rv40_weight_pixels_tab[1](r->s.dest[2], + r->tmp_b_block_uv[1], + r->tmp_b_block_uv[3], + r->weight1, + r->weight2, + r->s.uvlinesize); +} + static void rv34_mc_2mv(RV34DecContext *r, const int block_type) { - rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, + int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192; + + rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted, r->rdsp.put_pixels_tab, r->rdsp.put_chroma_pixels_tab); - rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, - r->rdsp.avg_pixels_tab, - r->rdsp.avg_chroma_pixels_tab); + if(!weighted){ + rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0, + r->rdsp.avg_pixels_tab, + r->rdsp.avg_chroma_pixels_tab); + }else{ + rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1, + r->rdsp.put_pixels_tab, + r->rdsp.put_chroma_pixels_tab); + rv4_weight(r); + } } static void rv34_mc_2mv_skip(RV34DecContext *r) { int i, j; + int weighted = !r->rv30 && r->weight1 != 8192; + for(j = 0; j < 2; j++) for(i = 0; i < 2; i++){ rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30, + weighted, r->rdsp.put_pixels_tab, r->rdsp.put_chroma_pixels_tab); rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30, - r->rdsp.avg_pixels_tab, - r->rdsp.avg_chroma_pixels_tab); + weighted, + weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab, + weighted ? r->rdsp.put_chroma_pixels_tab : r->rdsp.avg_chroma_pixels_tab); } + if(weighted) + rv4_weight(r); } /** number of motion vectors in each macroblock type */ @@ -1265,6 +1308,16 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int if(MPV_frame_start(s, s->avctx) < 0) return -1; ff_er_frame_start(s); + if (!r->tmp_b_block_base || s->width != r->si.width || s->height != r->si.height) { + int i; + + r->tmp_b_block_base = av_realloc(r->tmp_b_block_base, s->linesize * 48); + for (i = 0; i < 2; i++) + r->tmp_b_block_y[i] = r->tmp_b_block_base + i * 16 * s->linesize; + for (i = 0; i < 4; i++) + r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize + + (i >> 1) * 8 * s->uvlinesize + (i & 1) * 16; + } r->cur_pts = r->si.pts; if(s->pict_type != AV_PICTURE_TYPE_B){ r->last_pts = r->next_pts; @@ -1500,6 +1553,7 @@ av_cold int ff_rv34_decode_end(AVCodecContext *avctx) av_freep(&r->intra_types_hist); r->intra_types = NULL; + av_freep(&r->tmp_b_block_base); av_freep(&r->mb_type); av_freep(&r->cbp_luma); av_freep(&r->cbp_chroma); diff --git a/libavcodec/rv34.h b/libavcodec/rv34.h index ef19813cf8..12607fb806 100644 --- a/libavcodec/rv34.h +++ b/libavcodec/rv34.h @@ -116,6 +116,11 @@ typedef struct RV34DecContext{ /** 8x8 block available flags (for MV prediction) */ DECLARE_ALIGNED(8, uint32_t, avail_cache)[3*4]; + /** temporary blocks for RV4 weighted MC */ + uint8_t *tmp_b_block_y[2]; + uint8_t *tmp_b_block_uv[4]; + uint8_t *tmp_b_block_base; + int (*parse_slice_header)(struct RV34DecContext *r, GetBitContext *gb, SliceInfo *si); int (*decode_mb_info)(struct RV34DecContext *r); int (*decode_intra_types)(struct RV34DecContext *r, GetBitContext *gb, int8_t *dst); diff --git a/libavcodec/rv34dsp.h b/libavcodec/rv34dsp.h index 771a6c0f08..e1def7dc26 100644 --- a/libavcodec/rv34dsp.h +++ b/libavcodec/rv34dsp.h @@ -29,11 +29,17 @@ #include "dsputil.h" +typedef void (*rv40_weight_func)(uint8_t *dst/*align width (8 or 16)*/, + uint8_t *src1/*align width (8 or 16)*/, + uint8_t *src2/*align width (8 or 16)*/, + int w1, int w2, int stride); + typedef struct RV34DSPContext { qpel_mc_func put_pixels_tab[4][16]; qpel_mc_func avg_pixels_tab[4][16]; h264_chroma_mc_func put_chroma_pixels_tab[3]; h264_chroma_mc_func avg_chroma_pixels_tab[3]; + rv40_weight_func rv40_weight_pixels_tab[2]; } RV34DSPContext; void ff_rv30dsp_init(RV34DSPContext *c, DSPContext* dsp); diff --git a/libavcodec/rv40dsp.c b/libavcodec/rv40dsp.c index 132f063dfa..ca620ab7d7 100644 --- a/libavcodec/rv40dsp.c +++ b/libavcodec/rv40dsp.c @@ -285,6 +285,23 @@ static void OPNAME ## rv40_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*a RV40_CHROMA_MC(put_, op_put) RV40_CHROMA_MC(avg_, op_avg) +#define RV40_WEIGHT_FUNC(size) \ +static void rv40_weight_func_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, int stride)\ +{\ + int i, j;\ +\ + for (j = 0; j < size; j++) {\ + for (i = 0; i < size; i++)\ + dst[i] = (((w2 * src1[i]) >> 9) + ((w1 * src2[i]) >> 9) + 0x10) >> 5;\ + src1 += stride;\ + src2 += stride;\ + dst += stride;\ + }\ +} + +RV40_WEIGHT_FUNC(16) +RV40_WEIGHT_FUNC(8) + av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) { c->put_pixels_tab[0][ 0] = dsp->put_h264_qpel_pixels_tab[0][0]; c->put_pixels_tab[0][ 1] = put_rv40_qpel16_mc10_c; @@ -356,6 +373,9 @@ av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) { c->avg_chroma_pixels_tab[0] = avg_rv40_chroma_mc8_c; c->avg_chroma_pixels_tab[1] = avg_rv40_chroma_mc4_c; + c->rv40_weight_pixels_tab[0] = rv40_weight_func_16; + c->rv40_weight_pixels_tab[1] = rv40_weight_func_8; + if (HAVE_MMX) ff_rv40dsp_init_x86(c, dsp); } diff --git a/tests/ref/fate/real-rv40 b/tests/ref/fate/real-rv40 index 2a445d0ccb..990a530436 100644 --- a/tests/ref/fate/real-rv40 +++ b/tests/ref/fate/real-rv40 @@ -16,106 +16,106 @@ 0, 112500, 276480, 0x5f7a0d4f 0, 120000, 276480, 0x5f7a0d4f 0, 127500, 276480, 0x5f7a0d4f -0, 135000, 276480, 0x2d722f8a -0, 142500, 276480, 0xebbb3c8f -0, 150000, 276480, 0x8574c868 +0, 135000, 276480, 0x75641594 +0, 142500, 276480, 0x32ee3526 +0, 150000, 276480, 0x5ce39368 0, 157500, 276480, 0x4ec1e418 -0, 165000, 276480, 0x95f22651 -0, 172500, 276480, 0x071d897e -0, 180000, 276480, 0x9f7623f9 -0, 187500, 276480, 0x86d4dedf -0, 195000, 276480, 0xc0a0be22 -0, 202500, 276480, 0xc5902aec -0, 210000, 276480, 0xe000f066 -0, 217500, 276480, 0x0b2a48d5 -0, 225000, 276480, 0xa1565256 -0, 232500, 276480, 0x8de3ceb3 -0, 240000, 276480, 0x654b564a +0, 165000, 276480, 0x85cbc3b5 +0, 172500, 276480, 0x377c7b46 +0, 180000, 276480, 0x756a4a2e +0, 187500, 276480, 0xcb379547 +0, 195000, 276480, 0x99c085be +0, 202500, 276480, 0xe479ffed +0, 210000, 276480, 0x1e4fae19 +0, 217500, 276480, 0x776412ef +0, 225000, 276480, 0x58ce0f38 +0, 232500, 276480, 0x5ab69b27 +0, 240000, 276480, 0xc3db9706 0, 247500, 276480, 0xc9c57884 -0, 255000, 276480, 0x89cdcdd4 -0, 262500, 276480, 0x3594fe61 -0, 270000, 276480, 0x9d082a81 -0, 277500, 276480, 0x4e6cd0c3 -0, 285000, 276480, 0xc129765f -0, 292500, 276480, 0x92a04c99 -0, 300000, 276480, 0x5ca62953 -0, 307500, 276480, 0xb7e478aa -0, 315000, 276480, 0x932735d5 -0, 322500, 276480, 0xaaa2d7aa -0, 330000, 276480, 0xd1329996 +0, 255000, 276480, 0x000b5269 +0, 262500, 276480, 0x27ff7a5d +0, 270000, 276480, 0x70647530 +0, 277500, 276480, 0x97612c4b +0, 285000, 276480, 0xdf4e04d7 +0, 292500, 276480, 0xbd98f57c +0, 300000, 276480, 0x5163b29b +0, 307500, 276480, 0x99170e64 +0, 315000, 276480, 0x8a4e991f +0, 322500, 276480, 0x6a45425f +0, 330000, 276480, 0x7bf6b1ef 0, 337500, 276480, 0x6de1e34b -0, 345000, 276480, 0x8c963c9b -0, 352500, 276480, 0xce6eff29 -0, 360000, 276480, 0x25412f7e -0, 367500, 276480, 0x11a5ad85 -0, 375000, 276480, 0x26ea3248 -0, 382500, 276480, 0x86c35fa4 -0, 390000, 276480, 0xa98a2d38 -0, 397500, 276480, 0xed827333 -0, 405000, 276480, 0x5d44a824 -0, 412500, 276480, 0x46d54d04 -0, 420000, 276480, 0x413fd26a +0, 345000, 276480, 0xdcaaa99a +0, 352500, 276480, 0xd1e98808 +0, 360000, 276480, 0x6e2d524e +0, 367500, 276480, 0x22c50a3d +0, 375000, 276480, 0x62b76407 +0, 382500, 276480, 0x51e9b3eb +0, 390000, 276480, 0x441f7afd +0, 397500, 276480, 0xfb01efc6 +0, 405000, 276480, 0x294bb441 +0, 412500, 276480, 0xe04ac45e +0, 420000, 276480, 0x58f275ea 0, 427500, 276480, 0xf0b3b71b -0, 435000, 276480, 0x459bc06d -0, 442500, 276480, 0x4199cd45 -0, 450000, 276480, 0xa8d35683 -0, 457500, 276480, 0x9a3e7de0 -0, 465000, 276480, 0x5a30f666 -0, 472500, 276480, 0x40152668 -0, 480000, 276480, 0x90c4d22c -0, 487500, 276480, 0x5cbaacc9 -0, 495000, 276480, 0x72b658f1 -0, 502500, 276480, 0x0ba3dcc9 -0, 510000, 276480, 0x259ed5c1 +0, 435000, 276480, 0x674e34e4 +0, 442500, 276480, 0x41dda2d9 +0, 450000, 276480, 0xf46ba7fb +0, 457500, 276480, 0x28b54815 +0, 465000, 276480, 0xaf2b5d89 +0, 472500, 276480, 0x8facba58 +0, 480000, 276480, 0x28a63236 +0, 487500, 276480, 0x1ad43fd7 +0, 495000, 276480, 0x71507bd2 +0, 502500, 276480, 0x35626022 +0, 510000, 276480, 0x7c1139b3 0, 517500, 276480, 0x7fd73a99 -0, 525000, 276480, 0x488980c5 -0, 532500, 276480, 0x1d4c96a5 -0, 540000, 276480, 0x41ced7f2 -0, 547500, 276480, 0xd62d1837 -0, 555000, 276480, 0xf5fd9d20 -0, 562500, 276480, 0x2af91fda -0, 570000, 276480, 0x38ce229d -0, 577500, 276480, 0xf3a712c0 -0, 585000, 276480, 0x57b111d2 -0, 592500, 276480, 0x8556b792 -0, 600000, 276480, 0xb32d0896 +0, 525000, 276480, 0xb52e1aa2 +0, 532500, 276480, 0xd6f82cae +0, 540000, 276480, 0xf88f75d4 +0, 547500, 276480, 0x04a8e3ee +0, 555000, 276480, 0xa29f5b01 +0, 562500, 276480, 0x754ceaf5 +0, 570000, 276480, 0x5a38b4af +0, 577500, 276480, 0xfcebc261 +0, 585000, 276480, 0x3d3ca985 +0, 592500, 276480, 0x94a03c75 +0, 600000, 276480, 0x2f98911c 0, 607500, 276480, 0x923b9937 -0, 615000, 276480, 0x0da1e7e3 -0, 622500, 276480, 0x7f172382 -0, 630000, 276480, 0x93622b88 -0, 637500, 276480, 0x2599d540 -0, 645000, 276480, 0xed20c105 -0, 652500, 276480, 0x62ce256e -0, 660000, 276480, 0x286a04bb -0, 667500, 276480, 0x423f7e7c -0, 675000, 276480, 0x21fc252a -0, 682500, 276480, 0xf8a8e8ee -0, 690000, 276480, 0x770d4a8d +0, 615000, 276480, 0xefab7ffd +0, 622500, 276480, 0x6b9fbc80 +0, 630000, 276480, 0xe4bdbd1e +0, 637500, 276480, 0x225a56c0 +0, 645000, 276480, 0xf58b1b7c +0, 652500, 276480, 0xbaffcdcc +0, 660000, 276480, 0xeb6eb88f +0, 667500, 276480, 0xdb753d35 +0, 675000, 276480, 0xea80a82e +0, 682500, 276480, 0x2aae902a +0, 690000, 276480, 0x9b9ee961 0, 697500, 276480, 0xaa12b6fd -0, 705000, 276480, 0xdc7221a8 -0, 712500, 276480, 0x487eeb30 -0, 720000, 276480, 0x1e74f2db -0, 727500, 276480, 0x40ae2bc3 -0, 735000, 276480, 0x9ca9b930 -0, 742500, 276480, 0x9fb19b0f -0, 750000, 276480, 0x7bdf836c -0, 757500, 276480, 0x1e607ba7 -0, 765000, 276480, 0xbd96578b -0, 772500, 276480, 0x2124bf07 -0, 780000, 276480, 0x4895e27a +0, 705000, 276480, 0x50c31e73 +0, 712500, 276480, 0xdd9fb89f +0, 720000, 276480, 0xaf82399a +0, 727500, 276480, 0x7ce5f23c +0, 735000, 276480, 0x5aaa7519 +0, 742500, 276480, 0xe45a5599 +0, 750000, 276480, 0x704411fb +0, 757500, 276480, 0x9d7430a1 +0, 765000, 276480, 0x2c230702 +0, 772500, 276480, 0x4a4f76cd +0, 780000, 276480, 0x27f54854 0, 787500, 276480, 0x694d76e3 -0, 795000, 276480, 0xe70df513 -0, 802500, 276480, 0xcacafe6b -0, 810000, 276480, 0x64087748 -0, 817500, 276480, 0x571fda23 -0, 825000, 276480, 0x8c86cbe9 -0, 832500, 276480, 0xc8ea4671 -0, 840000, 276480, 0xbfb74300 -0, 847500, 276480, 0xbe1e3770 -0, 855000, 276480, 0x757a0232 -0, 862500, 276480, 0xa5f50c84 -0, 870000, 276480, 0x6d95f808 +0, 795000, 276480, 0x525463e2 +0, 802500, 276480, 0x819898f9 +0, 810000, 276480, 0xeeed00fc +0, 817500, 276480, 0xb6f99ee3 +0, 825000, 276480, 0xefc83107 +0, 832500, 276480, 0xbb22e024 +0, 840000, 276480, 0x300f922a +0, 847500, 276480, 0x826fc3bd +0, 855000, 276480, 0x679a53f8 +0, 862500, 276480, 0x976c9e93 +0, 870000, 276480, 0xb194656e 0, 877500, 276480, 0xf002c5ca -0, 885000, 276480, 0x1a2abb26 -0, 892500, 276480, 0x6cf69bf2 +0, 885000, 276480, 0xb243dda5 +0, 892500, 276480, 0x1700efbb 0, 900000, 276480, 0x8f316c66 From 791a86c37a03b94207bc2d0ad4cbe7f39d7e495a Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Wed, 27 Jul 2011 21:04:26 +0200 Subject: [PATCH 06/20] ffmpeg: add a warning stating that the program is deprecated. --- ffmpeg.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ffmpeg.c b/ffmpeg.c index c155a73b6d..1a6ba51b42 100644 --- a/ffmpeg.c +++ b/ffmpeg.c @@ -4394,6 +4394,10 @@ int main(int argc, char **argv) show_banner(); + av_log(NULL, AV_LOG_WARNING, "This program is not developed anymore and is only " + "provided for compatibility. Use avconv instead " + "(see Changelog for the list of incompatible changes).\n"); + /* parse options */ parse_options(argc, argv, options, opt_output_file); From 6291d7e41605c0b1e9debfae8a2b1d4cf7b0e0b3 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Wed, 27 Jul 2011 20:56:59 +0200 Subject: [PATCH 07/20] Make a copy of ffmpeg under a new name -- avconv. It will be further developed with a few incompatible changes. ffmpeg.c will stay as is for some time, so any scripts using it won't be broken. --- .gitignore | 1 + Makefile | 3 +- avconv.c | 4428 +++++++++++++++++++++++++++++++++++++ cmdutils.c | 8 +- configure | 11 +- doc/avconv.texi | 1065 +++++++++ doc/avplay.texi | 2 +- doc/avprobe.texi | 2 +- doc/avserver.texi | 2 +- tests/Makefile | 6 +- tests/codec-regression.sh | 12 +- tests/fate-run.sh | 14 +- tests/lavf-regression.sh | 36 +- tests/lavfi-regression.sh | 6 +- tests/regression-funcs.sh | 32 +- 15 files changed, 5564 insertions(+), 64 deletions(-) create mode 100644 avconv.c create mode 100644 doc/avconv.texi diff --git a/.gitignore b/.gitignore index 734e31373c..2118a5115e 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ doc/*.html doc/*.pod doxy ffmpeg +avconv avplay avprobe avserver diff --git a/Makefile b/Makefile index 7f88de5552..21639ca263 100644 --- a/Makefile +++ b/Makefile @@ -53,6 +53,7 @@ COMPILE_S = $(call COMPILE,AS) %.c %.h: TAG = GEN PROGS-$(CONFIG_FFMPEG) += ffmpeg +PROGS-$(CONFIG_AVCONV) += avconv PROGS-$(CONFIG_AVPLAY) += avplay PROGS-$(CONFIG_AVPROBE) += avprobe PROGS-$(CONFIG_AVSERVER) += avserver @@ -64,7 +65,7 @@ HOSTPROGS := $(TESTTOOLS:%=tests/%) TOOLS = qt-faststart trasher TOOLS-$(CONFIG_ZLIB) += cws2fws -BASENAMES = ffmpeg avplay avprobe avserver +BASENAMES = ffmpeg avconv avplay avprobe avserver ALLPROGS = $(BASENAMES:%=%$(EXESUF)) ALLMANPAGES = $(BASENAMES:%=%.1) diff --git a/avconv.c b/avconv.c new file mode 100644 index 0000000000..956de7b9ef --- /dev/null +++ b/avconv.c @@ -0,0 +1,4428 @@ +/* + * avconv main + * Copyright (c) 2000-2011 The libav developers. + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "config.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include "libavformat/avformat.h" +#include "libavdevice/avdevice.h" +#include "libswscale/swscale.h" +#include "libavutil/opt.h" +#include "libavcodec/audioconvert.h" +#include "libavutil/audioconvert.h" +#include "libavutil/parseutils.h" +#include "libavutil/samplefmt.h" +#include "libavutil/colorspace.h" +#include "libavutil/fifo.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/dict.h" +#include "libavutil/mathematics.h" +#include "libavutil/pixdesc.h" +#include "libavutil/avstring.h" +#include "libavutil/libm.h" +#include "libavformat/os_support.h" + +#if CONFIG_AVFILTER +# include "libavfilter/avfilter.h" +# include "libavfilter/avfiltergraph.h" +# include "libavfilter/vsrc_buffer.h" +#endif + +#if HAVE_SYS_RESOURCE_H +#include +#include +#include +#elif HAVE_GETPROCESSTIMES +#include +#endif +#if HAVE_GETPROCESSMEMORYINFO +#include +#include +#endif + +#if HAVE_SYS_SELECT_H +#include +#endif + +#include + +#include "cmdutils.h" + +#include "libavutil/avassert.h" + +const char program_name[] = "avconv"; +const int program_birth_year = 2000; + +/* select an input stream for an output stream */ +typedef struct StreamMap { + int file_index; + int stream_index; + int sync_file_index; + int sync_stream_index; +} StreamMap; + +/** + * select an input file for an output file + */ +typedef struct MetadataMap { + int file; //< file index + char type; //< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram + int index; //< stream/chapter/program number +} MetadataMap; + +typedef struct ChapterMap { + int in_file; + int out_file; +} ChapterMap; + +static const OptionDef options[]; + +#define MAX_FILES 100 + +static const char *last_asked_format = NULL; +static double *ts_scale; +static int nb_ts_scale; + +static AVFormatContext *output_files[MAX_FILES]; +static AVDictionary *output_opts[MAX_FILES]; +static int nb_output_files = 0; + +static StreamMap *stream_maps = NULL; +static int nb_stream_maps; + +/* first item specifies output metadata, second is input */ +static MetadataMap (*meta_data_maps)[2] = NULL; +static int nb_meta_data_maps; +static int metadata_global_autocopy = 1; +static int metadata_streams_autocopy = 1; +static int metadata_chapters_autocopy = 1; + +static ChapterMap *chapter_maps = NULL; +static int nb_chapter_maps; + +/* indexed by output file stream index */ +static int *streamid_map = NULL; +static int nb_streamid_map = 0; + +static int frame_width = 0; +static int frame_height = 0; +static float frame_aspect_ratio = 0; +static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE; +static enum AVSampleFormat audio_sample_fmt = AV_SAMPLE_FMT_NONE; +static int max_frames[4] = {INT_MAX, INT_MAX, INT_MAX, INT_MAX}; +static AVRational frame_rate; +static float video_qscale = 0; +static uint16_t *intra_matrix = NULL; +static uint16_t *inter_matrix = NULL; +static const char *video_rc_override_string=NULL; +static int video_disable = 0; +static int video_discard = 0; +static char *video_codec_name = NULL; +static unsigned int video_codec_tag = 0; +static char *video_language = NULL; +static int same_quality = 0; +static int do_deinterlace = 0; +static int top_field_first = -1; +static int me_threshold = 0; +static int intra_dc_precision = 8; +static int loop_input = 0; +static int loop_output = AVFMT_NOOUTPUTLOOP; +static int qp_hist = 0; +#if CONFIG_AVFILTER +static char *vfilters = NULL; +#endif + +static int intra_only = 0; +static int audio_sample_rate = 0; +#define QSCALE_NONE -99999 +static float audio_qscale = QSCALE_NONE; +static int audio_disable = 0; +static int audio_channels = 0; +static char *audio_codec_name = NULL; +static unsigned int audio_codec_tag = 0; +static char *audio_language = NULL; + +static int subtitle_disable = 0; +static char *subtitle_codec_name = NULL; +static char *subtitle_language = NULL; +static unsigned int subtitle_codec_tag = 0; + +static int data_disable = 0; +static char *data_codec_name = NULL; +static unsigned int data_codec_tag = 0; + +static float mux_preload= 0.5; +static float mux_max_delay= 0.7; + +static int64_t recording_time = INT64_MAX; +static int64_t start_time = 0; +static int64_t input_ts_offset = 0; +static int file_overwrite = 0; +static AVDictionary *metadata; +static int do_benchmark = 0; +static int do_hex_dump = 0; +static int do_pkt_dump = 0; +static int do_psnr = 0; +static int do_pass = 0; +static char *pass_logfilename_prefix = NULL; +static int audio_stream_copy = 0; +static int video_stream_copy = 0; +static int subtitle_stream_copy = 0; +static int data_stream_copy = 0; +static int video_sync_method= -1; +static int audio_sync_method= 0; +static float audio_drift_threshold= 0.1; +static int copy_ts= 0; +static int copy_tb; +static int opt_shortest = 0; +static char *vstats_filename; +static FILE *vstats_file; +static int opt_programid = 0; +static int copy_initial_nonkeyframes = 0; + +static int rate_emu = 0; + +static int audio_volume = 256; + +static int exit_on_error = 0; +static int using_stdin = 0; +static int verbose = 1; +static int thread_count= 1; +static int64_t video_size = 0; +static int64_t audio_size = 0; +static int64_t extra_size = 0; +static int nb_frames_dup = 0; +static int nb_frames_drop = 0; +static int input_sync; +static uint64_t limit_filesize = 0; +static int force_fps = 0; +static char *forced_key_frames = NULL; + +static float dts_delta_threshold = 10; + +static int64_t timer_start; + +static uint8_t *audio_buf; +static uint8_t *audio_out; +static unsigned int allocated_audio_out_size, allocated_audio_buf_size; + +static short *samples; + +static AVBitStreamFilterContext *video_bitstream_filters=NULL; +static AVBitStreamFilterContext *audio_bitstream_filters=NULL; +static AVBitStreamFilterContext *subtitle_bitstream_filters=NULL; + +#define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass" + +struct InputStream; + +typedef struct OutputStream { + int file_index; /* file index */ + int index; /* stream index in the output file */ + int source_index; /* InputStream index */ + AVStream *st; /* stream in the output file */ + int encoding_needed; /* true if encoding needed for this stream */ + int frame_number; + /* input pts and corresponding output pts + for A/V sync */ + //double sync_ipts; /* dts from the AVPacket of the demuxer in second units */ + struct InputStream *sync_ist; /* input stream to sync against */ + int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number + AVBitStreamFilterContext *bitstream_filters; + AVCodec *enc; + + /* video only */ + int video_resample; + AVFrame pict_tmp; /* temporary image for resampling */ + struct SwsContext *img_resample_ctx; /* for image resampling */ + int resample_height; + int resample_width; + int resample_pix_fmt; + AVRational frame_rate; + + float frame_aspect_ratio; + + /* forced key frames */ + int64_t *forced_kf_pts; + int forced_kf_count; + int forced_kf_index; + + /* audio only */ + int audio_resample; + ReSampleContext *resample; /* for audio resampling */ + int resample_sample_fmt; + int resample_channels; + int resample_sample_rate; + int reformat_pair; + AVAudioConvert *reformat_ctx; + AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */ + FILE *logfile; + +#if CONFIG_AVFILTER + AVFilterContext *output_video_filter; + AVFilterContext *input_video_filter; + AVFilterBufferRef *picref; + char *avfilter; + AVFilterGraph *graph; +#endif + + int sws_flags; + AVDictionary *opts; +} OutputStream; + +static OutputStream **output_streams_for_file[MAX_FILES] = { NULL }; +static int nb_output_streams_for_file[MAX_FILES] = { 0 }; + +typedef struct InputStream { + int file_index; + AVStream *st; + int discard; /* true if stream data should be discarded */ + int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */ + AVCodec *dec; + + int64_t start; /* time when read started */ + int64_t next_pts; /* synthetic pts for cases where pkt.pts + is not defined */ + int64_t pts; /* current pts */ + PtsCorrectionContext pts_ctx; + double ts_scale; + int is_start; /* is 1 at the start and after a discontinuity */ + int showed_multi_packet_warning; + int is_past_recording_time; + AVDictionary *opts; +} InputStream; + +typedef struct InputFile { + AVFormatContext *ctx; + int eof_reached; /* true if eof reached */ + int ist_index; /* index of first stream in ist_table */ + int buffer_size; /* current total buffer size */ + int64_t ts_offset; +} InputFile; + +static InputStream *input_streams = NULL; +static int nb_input_streams = 0; +static InputFile *input_files = NULL; +static int nb_input_files = 0; + +#if CONFIG_AVFILTER + +static int configure_video_filters(InputStream *ist, OutputStream *ost) +{ + AVFilterContext *last_filter, *filter; + /** filter graph containing all filters including input & output */ + AVCodecContext *codec = ost->st->codec; + AVCodecContext *icodec = ist->st->codec; + FFSinkContext ffsink_ctx = { .pix_fmt = codec->pix_fmt }; + AVRational sample_aspect_ratio; + char args[255]; + int ret; + + ost->graph = avfilter_graph_alloc(); + + if (ist->st->sample_aspect_ratio.num){ + sample_aspect_ratio = ist->st->sample_aspect_ratio; + }else + sample_aspect_ratio = ist->st->codec->sample_aspect_ratio; + + snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width, + ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE, + sample_aspect_ratio.num, sample_aspect_ratio.den); + + ret = avfilter_graph_create_filter(&ost->input_video_filter, avfilter_get_by_name("buffer"), + "src", args, NULL, ost->graph); + if (ret < 0) + return ret; + ret = avfilter_graph_create_filter(&ost->output_video_filter, &ffsink, + "out", NULL, &ffsink_ctx, ost->graph); + if (ret < 0) + return ret; + last_filter = ost->input_video_filter; + + if (codec->width != icodec->width || codec->height != icodec->height) { + snprintf(args, 255, "%d:%d:flags=0x%X", + codec->width, + codec->height, + ost->sws_flags); + if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"), + NULL, args, NULL, ost->graph)) < 0) + return ret; + if ((ret = avfilter_link(last_filter, 0, filter, 0)) < 0) + return ret; + last_filter = filter; + } + + snprintf(args, sizeof(args), "flags=0x%X", ost->sws_flags); + ost->graph->scale_sws_opts = av_strdup(args); + + if (ost->avfilter) { + AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut)); + AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut)); + + outputs->name = av_strdup("in"); + outputs->filter_ctx = last_filter; + outputs->pad_idx = 0; + outputs->next = NULL; + + inputs->name = av_strdup("out"); + inputs->filter_ctx = ost->output_video_filter; + inputs->pad_idx = 0; + inputs->next = NULL; + + if ((ret = avfilter_graph_parse(ost->graph, ost->avfilter, inputs, outputs, NULL)) < 0) + return ret; + av_freep(&ost->avfilter); + } else { + if ((ret = avfilter_link(last_filter, 0, ost->output_video_filter, 0)) < 0) + return ret; + } + + if ((ret = avfilter_graph_config(ost->graph, NULL)) < 0) + return ret; + + codec->width = ost->output_video_filter->inputs[0]->w; + codec->height = ost->output_video_filter->inputs[0]->h; + codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = + ost->frame_aspect_ratio ? // overriden by the -aspect cli option + av_d2q(ost->frame_aspect_ratio*codec->height/codec->width, 255) : + ost->output_video_filter->inputs[0]->sample_aspect_ratio; + + return 0; +} +#endif /* CONFIG_AVFILTER */ + +static void term_exit(void) +{ + av_log(NULL, AV_LOG_QUIET, ""); +} + +static volatile int received_sigterm = 0; +static volatile int received_nb_signals = 0; + +static void +sigterm_handler(int sig) +{ + received_sigterm = sig; + received_nb_signals++; + term_exit(); +} + +static void term_init(void) +{ + signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */ + signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */ +#ifdef SIGXCPU + signal(SIGXCPU, sigterm_handler); +#endif +} + +static int decode_interrupt_cb(void) +{ + return received_nb_signals > 1; +} + +static int exit_program(int ret) +{ + int i; + + /* close files */ + for(i=0;ioformat->flags & AVFMT_NOFILE) && s->pb) + avio_close(s->pb); + avformat_free_context(s); + av_free(output_streams_for_file[i]); + av_dict_free(&output_opts[i]); + } + for(i=0;ikey); + exit_program(1); + } +} + +static void assert_codec_experimental(AVCodecContext *c, int encoder) +{ + const char *codec_string = encoder ? "encoder" : "decoder"; + AVCodec *codec; + if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL && + c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { + av_log(NULL, AV_LOG_ERROR, "%s '%s' is experimental and might produce bad " + "results.\nAdd '-strict experimental' if you want to use it.\n", + codec_string, c->codec->name); + codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id); + if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL)) + av_log(NULL, AV_LOG_ERROR, "Or use the non experimental %s '%s'.\n", + codec_string, codec->name); + exit_program(1); + } +} + +/* similar to ff_dynarray_add() and av_fast_realloc() */ +static void *grow_array(void *array, int elem_size, int *size, int new_size) +{ + if (new_size >= INT_MAX / elem_size) { + fprintf(stderr, "Array too big.\n"); + exit_program(1); + } + if (*size < new_size) { + uint8_t *tmp = av_realloc(array, new_size*elem_size); + if (!tmp) { + fprintf(stderr, "Could not alloc buffer.\n"); + exit_program(1); + } + memset(tmp + *size*elem_size, 0, (new_size-*size) * elem_size); + *size = new_size; + return tmp; + } + return array; +} + +static void choose_sample_fmt(AVStream *st, AVCodec *codec) +{ + if(codec && codec->sample_fmts){ + const enum AVSampleFormat *p= codec->sample_fmts; + for(; *p!=-1; p++){ + if(*p == st->codec->sample_fmt) + break; + } + if (*p == -1) { + av_log(NULL, AV_LOG_WARNING, + "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n", + av_get_sample_fmt_name(st->codec->sample_fmt), + codec->name, + av_get_sample_fmt_name(codec->sample_fmts[0])); + st->codec->sample_fmt = codec->sample_fmts[0]; + } + } +} + +/** + * Update the requested input sample format based on the output sample format. + * This is currently only used to request float output from decoders which + * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT. + * Ideally this will be removed in the future when decoders do not do format + * conversion and only output in their native format. + */ +static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec, + AVCodecContext *enc) +{ + /* if sample formats match or a decoder sample format has already been + requested, just return */ + if (enc->sample_fmt == dec->sample_fmt || + dec->request_sample_fmt > AV_SAMPLE_FMT_NONE) + return; + + /* if decoder supports more than one output format */ + if (dec_codec && dec_codec->sample_fmts && + dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE && + dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) { + const enum AVSampleFormat *p; + int min_dec = -1, min_inc = -1; + + /* find a matching sample format in the encoder */ + for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) { + if (*p == enc->sample_fmt) { + dec->request_sample_fmt = *p; + return; + } else if (*p > enc->sample_fmt) { + min_inc = FFMIN(min_inc, *p - enc->sample_fmt); + } else + min_dec = FFMIN(min_dec, enc->sample_fmt - *p); + } + + /* if none match, provide the one that matches quality closest */ + dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc : + enc->sample_fmt - min_dec; + } +} + +static void choose_sample_rate(AVStream *st, AVCodec *codec) +{ + if(codec && codec->supported_samplerates){ + const int *p= codec->supported_samplerates; + int best=0; + int best_dist=INT_MAX; + for(; *p; p++){ + int dist= abs(st->codec->sample_rate - *p); + if(dist < best_dist){ + best_dist= dist; + best= *p; + } + } + if(best_dist){ + av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best); + } + st->codec->sample_rate= best; + } +} + +static void choose_pixel_fmt(AVStream *st, AVCodec *codec) +{ + if(codec && codec->pix_fmts){ + const enum PixelFormat *p= codec->pix_fmts; + if(st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL){ + if(st->codec->codec_id==CODEC_ID_MJPEG){ + p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE}; + }else if(st->codec->codec_id==CODEC_ID_LJPEG){ + p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE}; + } + } + for(; *p!=-1; p++){ + if(*p == st->codec->pix_fmt) + break; + } + if (*p == -1) { + if(st->codec->pix_fmt != PIX_FMT_NONE) + av_log(NULL, AV_LOG_WARNING, + "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n", + av_pix_fmt_descriptors[st->codec->pix_fmt].name, + codec->name, + av_pix_fmt_descriptors[codec->pix_fmts[0]].name); + st->codec->pix_fmt = codec->pix_fmts[0]; + } + } +} + +static OutputStream *new_output_stream(AVFormatContext *oc, int file_idx, AVCodec *codec) +{ + OutputStream *ost; + AVStream *st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0); + int idx = oc->nb_streams - 1; + + if (!st) { + av_log(NULL, AV_LOG_ERROR, "Could not alloc stream.\n"); + exit_program(1); + } + + output_streams_for_file[file_idx] = + grow_array(output_streams_for_file[file_idx], + sizeof(*output_streams_for_file[file_idx]), + &nb_output_streams_for_file[file_idx], + oc->nb_streams); + ost = output_streams_for_file[file_idx][idx] = + av_mallocz(sizeof(OutputStream)); + if (!ost) { + fprintf(stderr, "Could not alloc output stream\n"); + exit_program(1); + } + ost->file_index = file_idx; + ost->index = idx; + ost->st = st; + ost->enc = codec; + if (codec) + ost->opts = filter_codec_opts(codec_opts, codec->id, 1); + + avcodec_get_context_defaults3(st->codec, codec); + + ost->sws_flags = av_get_int(sws_opts, "sws_flags", NULL); + return ost; +} + +static int read_avserver_streams(AVFormatContext *s, const char *filename) +{ + int i, err; + AVFormatContext *ic = NULL; + int nopts = 0; + + err = avformat_open_input(&ic, filename, NULL, NULL); + if (err < 0) + return err; + /* copy stream format */ + for(i=0;inb_streams;i++) { + AVStream *st; + OutputStream *ost; + AVCodec *codec; + + codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id); + ost = new_output_stream(s, nb_output_files, codec); + st = ost->st; + + // FIXME: a more elegant solution is needed + memcpy(st, ic->streams[i], sizeof(AVStream)); + st->info = NULL; + avcodec_copy_context(st->codec, ic->streams[i]->codec); + + if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { + if (audio_stream_copy) { + st->stream_copy = 1; + } else + choose_sample_fmt(st, codec); + } else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { + if (video_stream_copy) { + st->stream_copy = 1; + } else + choose_pixel_fmt(st, codec); + } + + if(st->codec->flags & CODEC_FLAG_BITEXACT) + nopts = 1; + } + + av_close_input_file(ic); + return 0; +} + +static double +get_sync_ipts(const OutputStream *ost) +{ + const InputStream *ist = ost->sync_ist; + return (double)(ist->pts - start_time)/AV_TIME_BASE; +} + +static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){ + int ret; + + while(bsfc){ + AVPacket new_pkt= *pkt; + int a= av_bitstream_filter_filter(bsfc, avctx, NULL, + &new_pkt.data, &new_pkt.size, + pkt->data, pkt->size, + pkt->flags & AV_PKT_FLAG_KEY); + if(a>0){ + av_free_packet(pkt); + new_pkt.destruct= av_destruct_packet; + } else if(a<0){ + fprintf(stderr, "%s failed for stream %d, codec %s", + bsfc->filter->name, pkt->stream_index, + avctx->codec ? avctx->codec->name : "copy"); + print_error("", a); + if (exit_on_error) + exit_program(1); + } + *pkt= new_pkt; + + bsfc= bsfc->next; + } + + ret= av_interleaved_write_frame(s, pkt); + if(ret < 0){ + print_error("av_interleaved_write_frame()", ret); + exit_program(1); + } +} + +#define MAX_AUDIO_PACKET_SIZE (128 * 1024) + +static void do_audio_out(AVFormatContext *s, + OutputStream *ost, + InputStream *ist, + unsigned char *buf, int size) +{ + uint8_t *buftmp; + int64_t audio_out_size, audio_buf_size; + int64_t allocated_for_size= size; + + int size_out, frame_bytes, ret, resample_changed; + AVCodecContext *enc= ost->st->codec; + AVCodecContext *dec= ist->st->codec; + int osize = av_get_bytes_per_sample(enc->sample_fmt); + int isize = av_get_bytes_per_sample(dec->sample_fmt); + const int coded_bps = av_get_bits_per_sample(enc->codec->id); + +need_realloc: + audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels); + audio_buf_size= (audio_buf_size*enc->sample_rate + dec->sample_rate) / dec->sample_rate; + audio_buf_size= audio_buf_size*2 + 10000; //safety factors for the deprecated resampling API + audio_buf_size= FFMAX(audio_buf_size, enc->frame_size); + audio_buf_size*= osize*enc->channels; + + audio_out_size= FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels); + if(coded_bps > 8*osize) + audio_out_size= audio_out_size * coded_bps / (8*osize); + audio_out_size += FF_MIN_BUFFER_SIZE; + + if(audio_out_size > INT_MAX || audio_buf_size > INT_MAX){ + fprintf(stderr, "Buffer sizes too large\n"); + exit_program(1); + } + + av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size); + av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size); + if (!audio_buf || !audio_out){ + fprintf(stderr, "Out of memory in do_audio_out\n"); + exit_program(1); + } + + if (enc->channels != dec->channels || enc->sample_rate != dec->sample_rate) + ost->audio_resample = 1; + + resample_changed = ost->resample_sample_fmt != dec->sample_fmt || + ost->resample_channels != dec->channels || + ost->resample_sample_rate != dec->sample_rate; + + if ((ost->audio_resample && !ost->resample) || resample_changed) { + if (resample_changed) { + av_log(NULL, AV_LOG_INFO, "Input stream #%d.%d frame changed from rate:%d fmt:%s ch:%d to rate:%d fmt:%s ch:%d\n", + ist->file_index, ist->st->index, + ost->resample_sample_rate, av_get_sample_fmt_name(ost->resample_sample_fmt), ost->resample_channels, + dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt), dec->channels); + ost->resample_sample_fmt = dec->sample_fmt; + ost->resample_channels = dec->channels; + ost->resample_sample_rate = dec->sample_rate; + if (ost->resample) + audio_resample_close(ost->resample); + } + /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */ + if (audio_sync_method <= 1 && + ost->resample_sample_fmt == enc->sample_fmt && + ost->resample_channels == enc->channels && + ost->resample_sample_rate == enc->sample_rate) { + ost->resample = NULL; + ost->audio_resample = 0; + } else if (ost->audio_resample) { + if (dec->sample_fmt != AV_SAMPLE_FMT_S16) + fprintf(stderr, "Warning, using s16 intermediate sample format for resampling\n"); + ost->resample = av_audio_resample_init(enc->channels, dec->channels, + enc->sample_rate, dec->sample_rate, + enc->sample_fmt, dec->sample_fmt, + 16, 10, 0, 0.8); + if (!ost->resample) { + fprintf(stderr, "Can not resample %d channels @ %d Hz to %d channels @ %d Hz\n", + dec->channels, dec->sample_rate, + enc->channels, enc->sample_rate); + exit_program(1); + } + } + } + +#define MAKE_SFMT_PAIR(a,b) ((a)+AV_SAMPLE_FMT_NB*(b)) + if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt && + MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt)!=ost->reformat_pair) { + if (ost->reformat_ctx) + av_audio_convert_free(ost->reformat_ctx); + ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1, + dec->sample_fmt, 1, NULL, 0); + if (!ost->reformat_ctx) { + fprintf(stderr, "Cannot convert %s sample format to %s sample format\n", + av_get_sample_fmt_name(dec->sample_fmt), + av_get_sample_fmt_name(enc->sample_fmt)); + exit_program(1); + } + ost->reformat_pair=MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt); + } + + if(audio_sync_method){ + double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts + - av_fifo_size(ost->fifo)/(enc->channels * 2); + double idelta= delta*dec->sample_rate / enc->sample_rate; + int byte_delta= ((int)idelta)*2*dec->channels; + + //FIXME resample delay + if(fabs(delta) > 50){ + if(ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate){ + if(byte_delta < 0){ + byte_delta= FFMAX(byte_delta, -size); + size += byte_delta; + buf -= byte_delta; + if(verbose > 2) + fprintf(stderr, "discarding %d audio samples\n", (int)-delta); + if(!size) + return; + ist->is_start=0; + }else{ + static uint8_t *input_tmp= NULL; + input_tmp= av_realloc(input_tmp, byte_delta + size); + + if(byte_delta > allocated_for_size - size){ + allocated_for_size= byte_delta + (int64_t)size; + goto need_realloc; + } + ist->is_start=0; + + memset(input_tmp, 0, byte_delta); + memcpy(input_tmp + byte_delta, buf, size); + buf= input_tmp; + size += byte_delta; + if(verbose > 2) + fprintf(stderr, "adding %d audio samples of silence\n", (int)delta); + } + }else if(audio_sync_method>1){ + int comp= av_clip(delta, -audio_sync_method, audio_sync_method); + av_assert0(ost->audio_resample); + if(verbose > 2) + fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate); +// fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2)); + av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate); + } + } + }else + ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate) + - av_fifo_size(ost->fifo)/(enc->channels * 2); //FIXME wrong + + if (ost->audio_resample) { + buftmp = audio_buf; + size_out = audio_resample(ost->resample, + (short *)buftmp, (short *)buf, + size / (dec->channels * isize)); + size_out = size_out * enc->channels * osize; + } else { + buftmp = buf; + size_out = size; + } + + if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt) { + const void *ibuf[6]= {buftmp}; + void *obuf[6]= {audio_buf}; + int istride[6]= {isize}; + int ostride[6]= {osize}; + int len= size_out/istride[0]; + if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) { + printf("av_audio_convert() failed\n"); + if (exit_on_error) + exit_program(1); + return; + } + buftmp = audio_buf; + size_out = len*osize; + } + + /* now encode as many frames as possible */ + if (enc->frame_size > 1) { + /* output resampled raw samples */ + if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) { + fprintf(stderr, "av_fifo_realloc2() failed\n"); + exit_program(1); + } + av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL); + + frame_bytes = enc->frame_size * osize * enc->channels; + + while (av_fifo_size(ost->fifo) >= frame_bytes) { + AVPacket pkt; + av_init_packet(&pkt); + + av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL); + + //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio() + + ret = avcodec_encode_audio(enc, audio_out, audio_out_size, + (short *)audio_buf); + if (ret < 0) { + fprintf(stderr, "Audio encoding failed\n"); + exit_program(1); + } + audio_size += ret; + pkt.stream_index= ost->index; + pkt.data= audio_out; + pkt.size= ret; + if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) + pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); + pkt.flags |= AV_PKT_FLAG_KEY; + write_frame(s, &pkt, enc, ost->bitstream_filters); + + ost->sync_opts += enc->frame_size; + } + } else { + AVPacket pkt; + av_init_packet(&pkt); + + ost->sync_opts += size_out / (osize * enc->channels); + + /* output a pcm frame */ + /* determine the size of the coded buffer */ + size_out /= osize; + if (coded_bps) + size_out = size_out*coded_bps/8; + + if(size_out > audio_out_size){ + fprintf(stderr, "Internal error, buffer size too small\n"); + exit_program(1); + } + + //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio() + ret = avcodec_encode_audio(enc, audio_out, size_out, + (short *)buftmp); + if (ret < 0) { + fprintf(stderr, "Audio encoding failed\n"); + exit_program(1); + } + audio_size += ret; + pkt.stream_index= ost->index; + pkt.data= audio_out; + pkt.size= ret; + if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) + pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); + pkt.flags |= AV_PKT_FLAG_KEY; + write_frame(s, &pkt, enc, ost->bitstream_filters); + } +} + +static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp) +{ + AVCodecContext *dec; + AVPicture *picture2; + AVPicture picture_tmp; + uint8_t *buf = 0; + + dec = ist->st->codec; + + /* deinterlace : must be done before any resize */ + if (do_deinterlace) { + int size; + + /* create temporary picture */ + size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height); + buf = av_malloc(size); + if (!buf) + return; + + picture2 = &picture_tmp; + avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height); + + if(avpicture_deinterlace(picture2, picture, + dec->pix_fmt, dec->width, dec->height) < 0) { + /* if error, do not deinterlace */ + fprintf(stderr, "Deinterlacing failed\n"); + av_free(buf); + buf = NULL; + picture2 = picture; + } + } else { + picture2 = picture; + } + + if (picture != picture2) + *picture = *picture2; + *bufp = buf; +} + +/* we begin to correct av delay at this threshold */ +#define AV_DELAY_MAX 0.100 + +static void do_subtitle_out(AVFormatContext *s, + OutputStream *ost, + InputStream *ist, + AVSubtitle *sub, + int64_t pts) +{ + static uint8_t *subtitle_out = NULL; + int subtitle_out_max_size = 1024 * 1024; + int subtitle_out_size, nb, i; + AVCodecContext *enc; + AVPacket pkt; + + if (pts == AV_NOPTS_VALUE) { + fprintf(stderr, "Subtitle packets must have a pts\n"); + if (exit_on_error) + exit_program(1); + return; + } + + enc = ost->st->codec; + + if (!subtitle_out) { + subtitle_out = av_malloc(subtitle_out_max_size); + } + + /* Note: DVB subtitle need one packet to draw them and one other + packet to clear them */ + /* XXX: signal it in the codec context ? */ + if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) + nb = 2; + else + nb = 1; + + for(i = 0; i < nb; i++) { + sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q); + // start_display_time is required to be 0 + sub->pts += av_rescale_q(sub->start_display_time, (AVRational){1, 1000}, AV_TIME_BASE_Q); + sub->end_display_time -= sub->start_display_time; + sub->start_display_time = 0; + subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out, + subtitle_out_max_size, sub); + if (subtitle_out_size < 0) { + fprintf(stderr, "Subtitle encoding failed\n"); + exit_program(1); + } + + av_init_packet(&pkt); + pkt.stream_index = ost->index; + pkt.data = subtitle_out; + pkt.size = subtitle_out_size; + pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base); + if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) { + /* XXX: the pts correction is handled here. Maybe handling + it in the codec would be better */ + if (i == 0) + pkt.pts += 90 * sub->start_display_time; + else + pkt.pts += 90 * sub->end_display_time; + } + write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters); + } +} + +static int bit_buffer_size= 1024*256; +static uint8_t *bit_buffer= NULL; + +static void do_video_out(AVFormatContext *s, + OutputStream *ost, + InputStream *ist, + AVFrame *in_picture, + int *frame_size, float quality) +{ + int nb_frames, i, ret, resample_changed; + AVFrame *final_picture, *formatted_picture; + AVCodecContext *enc, *dec; + double sync_ipts; + + enc = ost->st->codec; + dec = ist->st->codec; + + sync_ipts = get_sync_ipts(ost) / av_q2d(enc->time_base); + + /* by default, we output a single frame */ + nb_frames = 1; + + *frame_size = 0; + + if(video_sync_method){ + double vdelta = sync_ipts - ost->sync_opts; + //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c + if (vdelta < -1.1) + nb_frames = 0; + else if (video_sync_method == 2 || (video_sync_method<0 && (s->oformat->flags & AVFMT_VARIABLE_FPS))){ + if(vdelta<=-0.6){ + nb_frames=0; + }else if(vdelta>0.6) + ost->sync_opts= lrintf(sync_ipts); + }else if (vdelta > 1.1) + nb_frames = lrintf(vdelta); +//fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames); + if (nb_frames == 0){ + ++nb_frames_drop; + if (verbose>2) + fprintf(stderr, "*** drop!\n"); + }else if (nb_frames > 1) { + nb_frames_dup += nb_frames - 1; + if (verbose>2) + fprintf(stderr, "*** %d dup!\n", nb_frames-1); + } + }else + ost->sync_opts= lrintf(sync_ipts); + + nb_frames= FFMIN(nb_frames, max_frames[AVMEDIA_TYPE_VIDEO] - ost->frame_number); + if (nb_frames <= 0) + return; + + formatted_picture = in_picture; + final_picture = formatted_picture; + + resample_changed = ost->resample_width != dec->width || + ost->resample_height != dec->height || + ost->resample_pix_fmt != dec->pix_fmt; + + if (resample_changed) { + av_log(NULL, AV_LOG_INFO, + "Input stream #%d.%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n", + ist->file_index, ist->st->index, + ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt), + dec->width , dec->height , av_get_pix_fmt_name(dec->pix_fmt)); + if(!ost->video_resample) + exit_program(1); + } + +#if !CONFIG_AVFILTER + if (ost->video_resample) { + final_picture = &ost->pict_tmp; + if (resample_changed) { + /* initialize a new scaler context */ + sws_freeContext(ost->img_resample_ctx); + ost->img_resample_ctx = sws_getContext( + ist->st->codec->width, + ist->st->codec->height, + ist->st->codec->pix_fmt, + ost->st->codec->width, + ost->st->codec->height, + ost->st->codec->pix_fmt, + ost->sws_flags, NULL, NULL, NULL); + if (ost->img_resample_ctx == NULL) { + fprintf(stderr, "Cannot get resampling context\n"); + exit_program(1); + } + } + sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize, + 0, ost->resample_height, final_picture->data, final_picture->linesize); + } +#endif + + /* duplicates frame if needed */ + for(i=0;iindex; + + if (s->oformat->flags & AVFMT_RAWPICTURE) { + /* raw pictures are written as AVPicture structure to + avoid any copies. We support temorarily the older + method. */ + AVFrame* old_frame = enc->coded_frame; + enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack + pkt.data= (uint8_t *)final_picture; + pkt.size= sizeof(AVPicture); + pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base); + pkt.flags |= AV_PKT_FLAG_KEY; + + write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters); + enc->coded_frame = old_frame; + } else { + AVFrame big_picture; + + big_picture= *final_picture; + /* better than nothing: use input picture interlaced + settings */ + big_picture.interlaced_frame = in_picture->interlaced_frame; + if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) { + if(top_field_first == -1) + big_picture.top_field_first = in_picture->top_field_first; + else + big_picture.top_field_first = top_field_first; + } + + /* handles sameq here. This is not correct because it may + not be a global option */ + big_picture.quality = quality; + if(!me_threshold) + big_picture.pict_type = 0; +// big_picture.pts = AV_NOPTS_VALUE; + big_picture.pts= ost->sync_opts; +// big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den); +//av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts); + if (ost->forced_kf_index < ost->forced_kf_count && + big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) { + big_picture.pict_type = AV_PICTURE_TYPE_I; + ost->forced_kf_index++; + } + ret = avcodec_encode_video(enc, + bit_buffer, bit_buffer_size, + &big_picture); + if (ret < 0) { + fprintf(stderr, "Video encoding failed\n"); + exit_program(1); + } + + if(ret>0){ + pkt.data= bit_buffer; + pkt.size= ret; + if(enc->coded_frame->pts != AV_NOPTS_VALUE) + pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); +/*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n", + pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1, + pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/ + + if(enc->coded_frame->key_frame) + pkt.flags |= AV_PKT_FLAG_KEY; + write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters); + *frame_size = ret; + video_size += ret; + //fprintf(stderr,"\nFrame: %3d size: %5d type: %d", + // enc->frame_number-1, ret, enc->pict_type); + /* if two pass, output log */ + if (ost->logfile && enc->stats_out) { + fprintf(ost->logfile, "%s", enc->stats_out); + } + } + } + ost->sync_opts++; + ost->frame_number++; + } +} + +static double psnr(double d){ + return -10.0*log(d)/log(10.0); +} + +static void do_video_stats(AVFormatContext *os, OutputStream *ost, + int frame_size) +{ + AVCodecContext *enc; + int frame_number; + double ti1, bitrate, avg_bitrate; + + /* this is executed just the first time do_video_stats is called */ + if (!vstats_file) { + vstats_file = fopen(vstats_filename, "w"); + if (!vstats_file) { + perror("fopen"); + exit_program(1); + } + } + + enc = ost->st->codec; + if (enc->codec_type == AVMEDIA_TYPE_VIDEO) { + frame_number = ost->frame_number; + fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA); + if (enc->flags&CODEC_FLAG_PSNR) + fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0))); + + fprintf(vstats_file,"f_size= %6d ", frame_size); + /* compute pts value */ + ti1 = ost->sync_opts * av_q2d(enc->time_base); + if (ti1 < 0.01) + ti1 = 0.01; + + bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0; + avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0; + fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", + (double)video_size / 1024, ti1, bitrate, avg_bitrate); + fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type)); + } +} + +static void print_report(AVFormatContext **output_files, + OutputStream **ost_table, int nb_ostreams, + int is_last_report) +{ + char buf[1024]; + OutputStream *ost; + AVFormatContext *oc; + int64_t total_size; + AVCodecContext *enc; + int frame_number, vid, i; + double bitrate, ti1, pts; + static int64_t last_time = -1; + static int qp_histogram[52]; + + if (!is_last_report) { + int64_t cur_time; + /* display the report every 0.5 seconds */ + cur_time = av_gettime(); + if (last_time == -1) { + last_time = cur_time; + return; + } + if ((cur_time - last_time) < 500000) + return; + last_time = cur_time; + } + + + oc = output_files[0]; + + total_size = avio_size(oc->pb); + if(total_size<0) // FIXME improve avio_size() so it works with non seekable output too + total_size= avio_tell(oc->pb); + + buf[0] = '\0'; + ti1 = 1e10; + vid = 0; + for(i=0;ist->codec; + if (!ost->st->stream_copy && enc->coded_frame) + q = enc->coded_frame->quality/(float)FF_QP2LAMBDA; + if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q); + } + if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { + float t = (av_gettime()-timer_start) / 1000000.0; + + frame_number = ost->frame_number; + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ", + frame_number, (t>1)?(int)(frame_number/t+0.5) : 0, q); + if(is_last_report) + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L"); + if(qp_hist){ + int j; + int qp = lrintf(q); + if(qp>=0 && qpflags&CODEC_FLAG_PSNR){ + int j; + double error, error_sum=0; + double scale, scale_sum=0; + char type[3]= {'Y','U','V'}; + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR="); + for(j=0; j<3; j++){ + if(is_last_report){ + error= enc->error[j]; + scale= enc->width*enc->height*255.0*255.0*frame_number; + }else{ + error= enc->coded_frame->error[j]; + scale= enc->width*enc->height*255.0*255.0; + } + if(j) scale/=4; + error_sum += error; + scale_sum += scale; + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale)); + } + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum)); + } + vid = 1; + } + /* compute min output value */ + pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base); + if ((pts < ti1) && (pts > 0)) + ti1 = pts; + } + if (ti1 < 0.01) + ti1 = 0.01; + + if (verbose > 0 || is_last_report) { + bitrate = (double)(total_size * 8) / ti1 / 1000.0; + + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), + "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s", + (double)total_size / 1024, ti1, bitrate); + + if (nb_frames_dup || nb_frames_drop) + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d", + nb_frames_dup, nb_frames_drop); + + if (verbose >= 0) + fprintf(stderr, "%s \r", buf); + + fflush(stderr); + } + + if (is_last_report && verbose >= 0){ + int64_t raw= audio_size + video_size + extra_size; + fprintf(stderr, "\n"); + fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n", + video_size/1024.0, + audio_size/1024.0, + extra_size/1024.0, + 100.0*(total_size - raw)/raw + ); + } +} + +static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size) +{ + int fill_char = 0x00; + if (sample_fmt == AV_SAMPLE_FMT_U8) + fill_char = 0x80; + memset(buf, fill_char, size); +} + +/* pkt = NULL means EOF (needed to flush decoder buffers) */ +static int output_packet(InputStream *ist, int ist_index, + OutputStream **ost_table, int nb_ostreams, + const AVPacket *pkt) +{ + AVFormatContext *os; + OutputStream *ost; + int ret, i; + int got_output; + AVFrame picture; + void *buffer_to_free = NULL; + static unsigned int samples_size= 0; + AVSubtitle subtitle, *subtitle_to_free; + int64_t pkt_pts = AV_NOPTS_VALUE; +#if CONFIG_AVFILTER + int frame_available; +#endif + float quality; + + AVPacket avpkt; + int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt); + + if(ist->next_pts == AV_NOPTS_VALUE) + ist->next_pts= ist->pts; + + if (pkt == NULL) { + /* EOF handling */ + av_init_packet(&avpkt); + avpkt.data = NULL; + avpkt.size = 0; + goto handle_eof; + } else { + avpkt = *pkt; + } + + if(pkt->dts != AV_NOPTS_VALUE) + ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); + if(pkt->pts != AV_NOPTS_VALUE) + pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); + + //while we have more to decode or while the decoder did output something on EOF + while (avpkt.size > 0 || (!pkt && got_output)) { + uint8_t *data_buf, *decoded_data_buf; + int data_size, decoded_data_size; + handle_eof: + ist->pts= ist->next_pts; + + if(avpkt.size && avpkt.size != pkt->size && + ((!ist->showed_multi_packet_warning && verbose>0) || verbose>1)){ + fprintf(stderr, "Multiple frames in a packet from stream %d\n", pkt->stream_index); + ist->showed_multi_packet_warning=1; + } + + /* decode the packet if needed */ + decoded_data_buf = NULL; /* fail safe */ + decoded_data_size= 0; + data_buf = avpkt.data; + data_size = avpkt.size; + subtitle_to_free = NULL; + if (ist->decoding_needed) { + switch(ist->st->codec->codec_type) { + case AVMEDIA_TYPE_AUDIO:{ + if(pkt && samples_size < FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE)) { + samples_size = FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE); + av_free(samples); + samples= av_malloc(samples_size); + } + decoded_data_size= samples_size; + /* XXX: could avoid copy if PCM 16 bits with same + endianness as CPU */ + ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size, + &avpkt); + if (ret < 0) + return ret; + avpkt.data += ret; + avpkt.size -= ret; + data_size = ret; + got_output = decoded_data_size > 0; + /* Some bug in mpeg audio decoder gives */ + /* decoded_data_size < 0, it seems they are overflows */ + if (!got_output) { + /* no audio frame */ + continue; + } + decoded_data_buf = (uint8_t *)samples; + ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) / + (ist->st->codec->sample_rate * ist->st->codec->channels); + break;} + case AVMEDIA_TYPE_VIDEO: + decoded_data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2; + /* XXX: allocate picture correctly */ + avcodec_get_frame_defaults(&picture); + avpkt.pts = pkt_pts; + avpkt.dts = ist->pts; + pkt_pts = AV_NOPTS_VALUE; + + ret = avcodec_decode_video2(ist->st->codec, + &picture, &got_output, &avpkt); + quality = same_quality ? picture.quality : 0; + if (ret < 0) + return ret; + if (!got_output) { + /* no picture yet */ + goto discard_packet; + } + ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, picture.pkt_pts, picture.pkt_dts); + if (ist->st->codec->time_base.num != 0) { + int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame; + ist->next_pts += ((int64_t)AV_TIME_BASE * + ist->st->codec->time_base.num * ticks) / + ist->st->codec->time_base.den; + } + avpkt.size = 0; + buffer_to_free = NULL; + pre_process_video_frame(ist, (AVPicture *)&picture, &buffer_to_free); + break; + case AVMEDIA_TYPE_SUBTITLE: + ret = avcodec_decode_subtitle2(ist->st->codec, + &subtitle, &got_output, &avpkt); + if (ret < 0) + return ret; + if (!got_output) { + goto discard_packet; + } + subtitle_to_free = &subtitle; + avpkt.size = 0; + break; + default: + return -1; + } + } else { + switch(ist->st->codec->codec_type) { + case AVMEDIA_TYPE_AUDIO: + ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) / + ist->st->codec->sample_rate; + break; + case AVMEDIA_TYPE_VIDEO: + if (ist->st->codec->time_base.num != 0) { + int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame; + ist->next_pts += ((int64_t)AV_TIME_BASE * + ist->st->codec->time_base.num * ticks) / + ist->st->codec->time_base.den; + } + break; + } + ret = avpkt.size; + avpkt.size = 0; + } + +#if CONFIG_AVFILTER + if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { + for (i = 0; i < nb_ostreams; i++) { + ost = ost_table[i]; + if (ost->input_video_filter && ost->source_index == ist_index) { + AVRational sar; + if (ist->st->sample_aspect_ratio.num) + sar = ist->st->sample_aspect_ratio; + else + sar = ist->st->codec->sample_aspect_ratio; + // add it to be filtered + av_vsrc_buffer_add_frame(ost->input_video_filter, &picture, + ist->pts, + sar); + } + } + } +#endif + + // preprocess audio (volume) + if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { + if (audio_volume != 256) { + short *volp; + volp = samples; + for(i=0;i<(decoded_data_size / sizeof(short));i++) { + int v = ((*volp) * audio_volume + 128) >> 8; + if (v < -32768) v = -32768; + if (v > 32767) v = 32767; + *volp++ = v; + } + } + } + + /* frame rate emulation */ + if (rate_emu) { + int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE); + int64_t now = av_gettime() - ist->start; + if (pts > now) + usleep(pts - now); + } + /* if output time reached then transcode raw format, + encode packets and output them */ + if (start_time == 0 || ist->pts >= start_time) + for(i=0;isource_index == ist_index) { +#if CONFIG_AVFILTER + frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || + !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]); + while (frame_available) { + AVRational ist_pts_tb; + if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter) + get_filtered_video_frame(ost->output_video_filter, &picture, &ost->picref, &ist_pts_tb); + if (ost->picref) + ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q); +#endif + os = output_files[ost->file_index]; + + /* set the input output pts pairs */ + //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE; + + if (ost->encoding_needed) { + av_assert0(ist->decoding_needed); + switch(ost->st->codec->codec_type) { + case AVMEDIA_TYPE_AUDIO: + do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size); + break; + case AVMEDIA_TYPE_VIDEO: +#if CONFIG_AVFILTER + if (ost->picref->video && !ost->frame_aspect_ratio) + ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect; +#endif + do_video_out(os, ost, ist, &picture, &frame_size, + same_quality ? quality : ost->st->codec->global_quality); + if (vstats_filename && frame_size) + do_video_stats(os, ost, frame_size); + break; + case AVMEDIA_TYPE_SUBTITLE: + do_subtitle_out(os, ost, ist, &subtitle, + pkt->pts); + break; + default: + abort(); + } + } else { + AVFrame avframe; //FIXME/XXX remove this + AVPacket opkt; + int64_t ost_tb_start_time= av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base); + + av_init_packet(&opkt); + + if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes) +#if !CONFIG_AVFILTER + continue; +#else + goto cont; +#endif + + /* no reencoding needed : output the packet directly */ + /* force the input stream PTS */ + + avcodec_get_frame_defaults(&avframe); + ost->st->codec->coded_frame= &avframe; + avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY; + + if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) + audio_size += data_size; + else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { + video_size += data_size; + ost->sync_opts++; + } + + opkt.stream_index= ost->index; + if(pkt->pts != AV_NOPTS_VALUE) + opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time; + else + opkt.pts= AV_NOPTS_VALUE; + + if (pkt->dts == AV_NOPTS_VALUE) + opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base); + else + opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base); + opkt.dts -= ost_tb_start_time; + + opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base); + opkt.flags= pkt->flags; + + //FIXME remove the following 2 lines they shall be replaced by the bitstream filters + if( ost->st->codec->codec_id != CODEC_ID_H264 + && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO + && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO + ) { + if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY)) + opkt.destruct= av_destruct_packet; + } else { + opkt.data = data_buf; + opkt.size = data_size; + } + + write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters); + ost->st->codec->frame_number++; + ost->frame_number++; + av_free_packet(&opkt); + } +#if CONFIG_AVFILTER + cont: + frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) && + ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]); + if (ost->picref) + avfilter_unref_buffer(ost->picref); + } +#endif + } + } + + av_free(buffer_to_free); + /* XXX: allocate the subtitles in the codec ? */ + if (subtitle_to_free) { + avsubtitle_free(subtitle_to_free); + subtitle_to_free = NULL; + } + } + discard_packet: + if (pkt == NULL) { + /* EOF handling */ + + for(i=0;isource_index == ist_index) { + AVCodecContext *enc= ost->st->codec; + os = output_files[ost->file_index]; + + if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1) + continue; + if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE)) + continue; + + if (ost->encoding_needed) { + for(;;) { + AVPacket pkt; + int fifo_bytes; + av_init_packet(&pkt); + pkt.stream_index= ost->index; + + switch(ost->st->codec->codec_type) { + case AVMEDIA_TYPE_AUDIO: + fifo_bytes = av_fifo_size(ost->fifo); + ret = 0; + /* encode any samples remaining in fifo */ + if (fifo_bytes > 0) { + int osize = av_get_bytes_per_sample(enc->sample_fmt); + int fs_tmp = enc->frame_size; + + av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL); + if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) { + enc->frame_size = fifo_bytes / (osize * enc->channels); + } else { /* pad */ + int frame_bytes = enc->frame_size*osize*enc->channels; + if (allocated_audio_buf_size < frame_bytes) + exit_program(1); + generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes); + } + + ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf); + pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den, + ost->st->time_base.num, enc->sample_rate); + enc->frame_size = fs_tmp; + } + if(ret <= 0) { + ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL); + } + if (ret < 0) { + fprintf(stderr, "Audio encoding failed\n"); + exit_program(1); + } + audio_size += ret; + pkt.flags |= AV_PKT_FLAG_KEY; + break; + case AVMEDIA_TYPE_VIDEO: + ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL); + if (ret < 0) { + fprintf(stderr, "Video encoding failed\n"); + exit_program(1); + } + video_size += ret; + if(enc->coded_frame && enc->coded_frame->key_frame) + pkt.flags |= AV_PKT_FLAG_KEY; + if (ost->logfile && enc->stats_out) { + fprintf(ost->logfile, "%s", enc->stats_out); + } + break; + default: + ret=-1; + } + + if(ret<=0) + break; + pkt.data= bit_buffer; + pkt.size= ret; + if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) + pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); + write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters); + } + } + } + } + } + + return 0; +} + +static void print_sdp(AVFormatContext **avc, int n) +{ + char sdp[2048]; + + av_sdp_create(avc, n, sdp, sizeof(sdp)); + printf("SDP:\n%s\n", sdp); + fflush(stdout); +} + +static int copy_chapters(int infile, int outfile) +{ + AVFormatContext *is = input_files[infile].ctx; + AVFormatContext *os = output_files[outfile]; + int i; + + for (i = 0; i < is->nb_chapters; i++) { + AVChapter *in_ch = is->chapters[i], *out_ch; + int64_t ts_off = av_rescale_q(start_time - input_files[infile].ts_offset, + AV_TIME_BASE_Q, in_ch->time_base); + int64_t rt = (recording_time == INT64_MAX) ? INT64_MAX : + av_rescale_q(recording_time, AV_TIME_BASE_Q, in_ch->time_base); + + + if (in_ch->end < ts_off) + continue; + if (rt != INT64_MAX && in_ch->start > rt + ts_off) + break; + + out_ch = av_mallocz(sizeof(AVChapter)); + if (!out_ch) + return AVERROR(ENOMEM); + + out_ch->id = in_ch->id; + out_ch->time_base = in_ch->time_base; + out_ch->start = FFMAX(0, in_ch->start - ts_off); + out_ch->end = FFMIN(rt, in_ch->end - ts_off); + + if (metadata_chapters_autocopy) + av_dict_copy(&out_ch->metadata, in_ch->metadata, 0); + + os->nb_chapters++; + os->chapters = av_realloc(os->chapters, sizeof(AVChapter)*os->nb_chapters); + if (!os->chapters) + return AVERROR(ENOMEM); + os->chapters[os->nb_chapters - 1] = out_ch; + } + return 0; +} + +static void parse_forced_key_frames(char *kf, OutputStream *ost, + AVCodecContext *avctx) +{ + char *p; + int n = 1, i; + int64_t t; + + for (p = kf; *p; p++) + if (*p == ',') + n++; + ost->forced_kf_count = n; + ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n); + if (!ost->forced_kf_pts) { + av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n"); + exit_program(1); + } + for (i = 0; i < n; i++) { + p = i ? strchr(p, ',') + 1 : kf; + t = parse_time_or_die("force_key_frames", p, 1); + ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base); + } +} + +/* + * The following code is the main loop of the file converter + */ +static int transcode(AVFormatContext **output_files, + int nb_output_files, + InputFile *input_files, + int nb_input_files, + StreamMap *stream_maps, int nb_stream_maps) +{ + int ret = 0, i, j, k, n, nb_ostreams = 0; + AVFormatContext *is, *os; + AVCodecContext *codec, *icodec; + OutputStream *ost, **ost_table = NULL; + InputStream *ist; + char error[1024]; + int want_sdp = 1; + uint8_t no_packet[MAX_FILES]={0}; + int no_packet_count=0; + + if (rate_emu) + for (i = 0; i < nb_input_streams; i++) + input_streams[i].start = av_gettime(); + + /* output stream init */ + nb_ostreams = 0; + for(i=0;inb_streams && !(os->oformat->flags & AVFMT_NOSTREAMS)) { + av_dump_format(output_files[i], i, output_files[i]->filename, 1); + fprintf(stderr, "Output file #%d does not contain any stream\n", i); + ret = AVERROR(EINVAL); + goto fail; + } + nb_ostreams += os->nb_streams; + } + if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) { + fprintf(stderr, "Number of stream maps must match number of output streams\n"); + ret = AVERROR(EINVAL); + goto fail; + } + + /* Sanity check the mapping args -- do the input files & streams exist? */ + for(i=0;i nb_input_files - 1 || + si < 0 || si > input_files[fi].ctx->nb_streams - 1) { + fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si); + ret = AVERROR(EINVAL); + goto fail; + } + fi = stream_maps[i].sync_file_index; + si = stream_maps[i].sync_stream_index; + if (fi < 0 || fi > nb_input_files - 1 || + si < 0 || si > input_files[fi].ctx->nb_streams - 1) { + fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si); + ret = AVERROR(EINVAL); + goto fail; + } + } + + ost_table = av_mallocz(sizeof(OutputStream *) * nb_ostreams); + if (!ost_table) + goto fail; + n = 0; + for(k=0;knb_streams;i++,n++) { + int found; + ost = ost_table[n] = output_streams_for_file[k][i]; + if (nb_stream_maps > 0) { + ost->source_index = input_files[stream_maps[n].file_index].ist_index + + stream_maps[n].stream_index; + + /* Sanity check that the stream types match */ + if (input_streams[ost->source_index].st->codec->codec_type != ost->st->codec->codec_type) { + int i= ost->file_index; + av_dump_format(output_files[i], i, output_files[i]->filename, 1); + fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n", + stream_maps[n].file_index, stream_maps[n].stream_index, + ost->file_index, ost->index); + exit_program(1); + } + + } else { + int best_nb_frames=-1; + /* get corresponding input stream index : we select the first one with the right type */ + found = 0; + for (j = 0; j < nb_input_streams; j++) { + int skip=0; + ist = &input_streams[j]; + if(opt_programid){ + int pi,si; + AVFormatContext *f = input_files[ist->file_index].ctx; + skip=1; + for(pi=0; pinb_programs; pi++){ + AVProgram *p= f->programs[pi]; + if(p->id == opt_programid) + for(si=0; sinb_stream_indexes; si++){ + if(f->streams[ p->stream_index[si] ] == ist->st) + skip=0; + } + } + } + if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && + ist->st->codec->codec_type == ost->st->codec->codec_type) { + if(best_nb_frames < ist->st->codec_info_nb_frames){ + best_nb_frames= ist->st->codec_info_nb_frames; + ost->source_index = j; + found = 1; + } + } + } + + if (!found) { + if(! opt_programid) { + /* try again and reuse existing stream */ + for (j = 0; j < nb_input_streams; j++) { + ist = &input_streams[j]; + if ( ist->st->codec->codec_type == ost->st->codec->codec_type + && ist->st->discard != AVDISCARD_ALL) { + ost->source_index = j; + found = 1; + } + } + } + if (!found) { + int i= ost->file_index; + av_dump_format(output_files[i], i, output_files[i]->filename, 1); + fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n", + ost->file_index, ost->index); + exit_program(1); + } + } + } + ist = &input_streams[ost->source_index]; + ist->discard = 0; + ost->sync_ist = (nb_stream_maps > 0) ? + &input_streams[input_files[stream_maps[n].sync_file_index].ist_index + + stream_maps[n].sync_stream_index] : ist; + } + } + + /* for each output stream, we compute the right encoding parameters */ + for(i=0;ifile_index]; + ist = &input_streams[ost->source_index]; + + codec = ost->st->codec; + icodec = ist->st->codec; + + if (metadata_streams_autocopy) + av_dict_copy(&ost->st->metadata, ist->st->metadata, + AV_DICT_DONT_OVERWRITE); + + ost->st->disposition = ist->st->disposition; + codec->bits_per_raw_sample= icodec->bits_per_raw_sample; + codec->chroma_sample_location = icodec->chroma_sample_location; + + if (ost->st->stream_copy) { + uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; + + if (extra_size > INT_MAX) + goto fail; + + /* if stream_copy is selected, no need to decode or encode */ + codec->codec_id = icodec->codec_id; + codec->codec_type = icodec->codec_type; + + if(!codec->codec_tag){ + if( !os->oformat->codec_tag + || av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id + || av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0) + codec->codec_tag = icodec->codec_tag; + } + + codec->bit_rate = icodec->bit_rate; + codec->rc_max_rate = icodec->rc_max_rate; + codec->rc_buffer_size = icodec->rc_buffer_size; + codec->extradata= av_mallocz(extra_size); + if (!codec->extradata) + goto fail; + memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); + codec->extradata_size= icodec->extradata_size; + if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ + codec->time_base = icodec->time_base; + codec->time_base.num *= icodec->ticks_per_frame; + av_reduce(&codec->time_base.num, &codec->time_base.den, + codec->time_base.num, codec->time_base.den, INT_MAX); + }else + codec->time_base = ist->st->time_base; + switch(codec->codec_type) { + case AVMEDIA_TYPE_AUDIO: + if(audio_volume != 256) { + fprintf(stderr,"-acodec copy and -vol are incompatible (frames are not decoded)\n"); + exit_program(1); + } + codec->channel_layout = icodec->channel_layout; + codec->sample_rate = icodec->sample_rate; + codec->channels = icodec->channels; + codec->frame_size = icodec->frame_size; + codec->audio_service_type = icodec->audio_service_type; + codec->block_align= icodec->block_align; + if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3) + codec->block_align= 0; + if(codec->codec_id == CODEC_ID_AC3) + codec->block_align= 0; + break; + case AVMEDIA_TYPE_VIDEO: + codec->pix_fmt = icodec->pix_fmt; + codec->width = icodec->width; + codec->height = icodec->height; + codec->has_b_frames = icodec->has_b_frames; + if (!codec->sample_aspect_ratio.num) { + codec->sample_aspect_ratio = + ost->st->sample_aspect_ratio = + ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio : + ist->st->codec->sample_aspect_ratio.num ? + ist->st->codec->sample_aspect_ratio : (AVRational){0, 1}; + } + break; + case AVMEDIA_TYPE_SUBTITLE: + codec->width = icodec->width; + codec->height = icodec->height; + break; + case AVMEDIA_TYPE_DATA: + break; + default: + abort(); + } + } else { + if (!ost->enc) + ost->enc = avcodec_find_encoder(ost->st->codec->codec_id); + switch(codec->codec_type) { + case AVMEDIA_TYPE_AUDIO: + ost->fifo= av_fifo_alloc(1024); + if(!ost->fifo) + goto fail; + ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE); + if (!codec->sample_rate) { + codec->sample_rate = icodec->sample_rate; + if (icodec->lowres) + codec->sample_rate >>= icodec->lowres; + } + choose_sample_rate(ost->st, ost->enc); + codec->time_base = (AVRational){1, codec->sample_rate}; + if (codec->sample_fmt == AV_SAMPLE_FMT_NONE) + codec->sample_fmt = icodec->sample_fmt; + choose_sample_fmt(ost->st, ost->enc); + if (!codec->channels) + codec->channels = icodec->channels; + codec->channel_layout = icodec->channel_layout; + if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels) + codec->channel_layout = 0; + ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1; + icodec->request_channels = codec->channels; + ist->decoding_needed = 1; + ost->encoding_needed = 1; + ost->resample_sample_fmt = icodec->sample_fmt; + ost->resample_sample_rate = icodec->sample_rate; + ost->resample_channels = icodec->channels; + break; + case AVMEDIA_TYPE_VIDEO: + if (codec->pix_fmt == PIX_FMT_NONE) + codec->pix_fmt = icodec->pix_fmt; + choose_pixel_fmt(ost->st, ost->enc); + + if (ost->st->codec->pix_fmt == PIX_FMT_NONE) { + fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n"); + exit_program(1); + } + + if (!codec->width || !codec->height) { + codec->width = icodec->width; + codec->height = icodec->height; + } + + ost->video_resample = codec->width != icodec->width || + codec->height != icodec->height || + codec->pix_fmt != icodec->pix_fmt; + if (ost->video_resample) { +#if !CONFIG_AVFILTER + avcodec_get_frame_defaults(&ost->pict_tmp); + if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt, + codec->width, codec->height)) { + fprintf(stderr, "Cannot allocate temp picture, check pix fmt\n"); + exit_program(1); + } + ost->img_resample_ctx = sws_getContext( + icodec->width, + icodec->height, + icodec->pix_fmt, + codec->width, + codec->height, + codec->pix_fmt, + ost->sws_flags, NULL, NULL, NULL); + if (ost->img_resample_ctx == NULL) { + fprintf(stderr, "Cannot get resampling context\n"); + exit_program(1); + } +#endif + codec->bits_per_raw_sample= 0; + } + + ost->resample_height = icodec->height; + ost->resample_width = icodec->width; + ost->resample_pix_fmt= icodec->pix_fmt; + ost->encoding_needed = 1; + ist->decoding_needed = 1; + + if (!ost->frame_rate.num) + ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1}; + if (ost->enc && ost->enc->supported_framerates && !force_fps) { + int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); + ost->frame_rate = ost->enc->supported_framerates[idx]; + } + codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num}; + +#if CONFIG_AVFILTER + if (configure_video_filters(ist, ost)) { + fprintf(stderr, "Error opening filters!\n"); + exit(1); + } +#endif + break; + case AVMEDIA_TYPE_SUBTITLE: + ost->encoding_needed = 1; + ist->decoding_needed = 1; + break; + default: + abort(); + break; + } + /* two pass mode */ + if (ost->encoding_needed && + (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { + char logfilename[1024]; + FILE *f; + + snprintf(logfilename, sizeof(logfilename), "%s-%d.log", + pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX, + i); + if (codec->flags & CODEC_FLAG_PASS1) { + f = fopen(logfilename, "wb"); + if (!f) { + fprintf(stderr, "Cannot write log file '%s' for pass-1 encoding: %s\n", logfilename, strerror(errno)); + exit_program(1); + } + ost->logfile = f; + } else { + char *logbuffer; + size_t logbuffer_size; + if (read_file(logfilename, &logbuffer, &logbuffer_size) < 0) { + fprintf(stderr, "Error reading log file '%s' for pass-2 encoding\n", logfilename); + exit_program(1); + } + codec->stats_in = logbuffer; + } + } + } + if(codec->codec_type == AVMEDIA_TYPE_VIDEO){ + int size= codec->width * codec->height; + bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 200); + } + } + + if (!bit_buffer) + bit_buffer = av_malloc(bit_buffer_size); + if (!bit_buffer) { + fprintf(stderr, "Cannot allocate %d bytes output buffer\n", + bit_buffer_size); + ret = AVERROR(ENOMEM); + goto fail; + } + + /* open each encoder */ + for(i=0;iencoding_needed) { + AVCodec *codec = ost->enc; + AVCodecContext *dec = input_streams[ost->source_index].st->codec; + if (!codec) { + snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d.%d", + ost->st->codec->codec_id, ost->file_index, ost->index); + ret = AVERROR(EINVAL); + goto dump_format; + } + if (dec->subtitle_header) { + ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size); + if (!ost->st->codec->subtitle_header) { + ret = AVERROR(ENOMEM); + goto dump_format; + } + memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); + ost->st->codec->subtitle_header_size = dec->subtitle_header_size; + } + if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) { + snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height", + ost->file_index, ost->index); + ret = AVERROR(EINVAL); + goto dump_format; + } + assert_codec_experimental(ost->st->codec, 1); + assert_avoptions(ost->opts); + if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000) + av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." + "It takes bits/s as argument, not kbits/s\n"); + extra_size += ost->st->codec->extradata_size; + } + } + + /* open each decoder */ + for (i = 0; i < nb_input_streams; i++) { + ist = &input_streams[i]; + if (ist->decoding_needed) { + AVCodec *codec = ist->dec; + if (!codec) + codec = avcodec_find_decoder(ist->st->codec->codec_id); + if (!codec) { + snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d", + ist->st->codec->codec_id, ist->file_index, ist->st->index); + ret = AVERROR(EINVAL); + goto dump_format; + } + + /* update requested sample format for the decoder based on the + corresponding encoder sample format */ + for (j = 0; j < nb_ostreams; j++) { + ost = ost_table[j]; + if (ost->source_index == i) { + update_sample_fmt(ist->st->codec, codec, ost->st->codec); + break; + } + } + + if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { + snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d", + ist->file_index, ist->st->index); + ret = AVERROR(EINVAL); + goto dump_format; + } + assert_codec_experimental(ist->st->codec, 0); + assert_avoptions(ost->opts); + //if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) + // ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD; + } + } + + /* init pts */ + for (i = 0; i < nb_input_streams; i++) { + AVStream *st; + ist = &input_streams[i]; + st= ist->st; + ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0; + ist->next_pts = AV_NOPTS_VALUE; + init_pts_correction(&ist->pts_ctx); + ist->is_start = 1; + } + + /* set meta data information from input file if required */ + for (i=0;i= (nb_elems)) {\ + snprintf(error, sizeof(error), "Invalid %s index %d while processing metadata maps\n",\ + (desc), (index));\ + ret = AVERROR(EINVAL);\ + goto dump_format;\ + } + + int out_file_index = meta_data_maps[i][0].file; + int in_file_index = meta_data_maps[i][1].file; + if (in_file_index < 0 || out_file_index < 0) + continue; + METADATA_CHECK_INDEX(out_file_index, nb_output_files, "output file") + METADATA_CHECK_INDEX(in_file_index, nb_input_files, "input file") + + files[0] = output_files[out_file_index]; + files[1] = input_files[in_file_index].ctx; + + for (j = 0; j < 2; j++) { + MetadataMap *map = &meta_data_maps[i][j]; + + switch (map->type) { + case 'g': + meta[j] = &files[j]->metadata; + break; + case 's': + METADATA_CHECK_INDEX(map->index, files[j]->nb_streams, "stream") + meta[j] = &files[j]->streams[map->index]->metadata; + break; + case 'c': + METADATA_CHECK_INDEX(map->index, files[j]->nb_chapters, "chapter") + meta[j] = &files[j]->chapters[map->index]->metadata; + break; + case 'p': + METADATA_CHECK_INDEX(map->index, files[j]->nb_programs, "program") + meta[j] = &files[j]->programs[map->index]->metadata; + break; + } + } + + av_dict_copy(meta[0], *meta[1], AV_DICT_DONT_OVERWRITE); + } + + /* copy global metadata by default */ + if (metadata_global_autocopy) { + + for (i = 0; i < nb_output_files; i++) + av_dict_copy(&output_files[i]->metadata, input_files[0].ctx->metadata, + AV_DICT_DONT_OVERWRITE); + } + + /* copy chapters according to chapter maps */ + for (i = 0; i < nb_chapter_maps; i++) { + int infile = chapter_maps[i].in_file; + int outfile = chapter_maps[i].out_file; + + if (infile < 0 || outfile < 0) + continue; + if (infile >= nb_input_files) { + snprintf(error, sizeof(error), "Invalid input file index %d in chapter mapping.\n", infile); + ret = AVERROR(EINVAL); + goto dump_format; + } + if (outfile >= nb_output_files) { + snprintf(error, sizeof(error), "Invalid output file index %d in chapter mapping.\n",outfile); + ret = AVERROR(EINVAL); + goto dump_format; + } + copy_chapters(infile, outfile); + } + + /* copy chapters from the first input file that has them*/ + if (!nb_chapter_maps) + for (i = 0; i < nb_input_files; i++) { + if (!input_files[i].ctx->nb_chapters) + continue; + + for (j = 0; j < nb_output_files; j++) + if ((ret = copy_chapters(i, j)) < 0) + goto dump_format; + break; + } + + /* open files and write file headers */ + for(i=0;ioformat->name, "rtp")) { + want_sdp = 0; + } + } + + dump_format: + /* dump the file output parameters - cannot be done before in case + of stream copy */ + for(i=0;ifilename, 1); + } + + /* dump the stream mapping */ + if (verbose >= 0) { + fprintf(stderr, "Stream mapping:\n"); + for(i=0;i #%d.%d", + input_streams[ost->source_index].file_index, + input_streams[ost->source_index].st->index, + ost->file_index, + ost->index); + if (ost->sync_ist != &input_streams[ost->source_index]) + fprintf(stderr, " [sync #%d.%d]", + ost->sync_ist->file_index, + ost->sync_ist->st->index); + fprintf(stderr, "\n"); + } + } + + if (ret) { + fprintf(stderr, "%s\n", error); + goto fail; + } + + if (want_sdp) { + print_sdp(output_files, nb_output_files); + } + + if (verbose >= 0) + fprintf(stderr, "Press ctrl-c to stop encoding\n"); + term_init(); + + timer_start = av_gettime(); + + for(; received_sigterm == 0;) { + int file_index, ist_index; + AVPacket pkt; + double ipts_min; + double opts_min; + + redo: + ipts_min= 1e100; + opts_min= 1e100; + + /* select the stream that we must read now by looking at the + smallest output pts */ + file_index = -1; + for(i=0;ifile_index]; + ist = &input_streams[ost->source_index]; + if(ist->is_past_recording_time || no_packet[ist->file_index]) + continue; + opts = ost->st->pts.val * av_q2d(ost->st->time_base); + ipts = (double)ist->pts; + if (!input_files[ist->file_index].eof_reached){ + if(ipts < ipts_min) { + ipts_min = ipts; + if(input_sync ) file_index = ist->file_index; + } + if(opts < opts_min) { + opts_min = opts; + if(!input_sync) file_index = ist->file_index; + } + } + if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){ + file_index= -1; + break; + } + } + /* if none, if is finished */ + if (file_index < 0) { + if(no_packet_count){ + no_packet_count=0; + memset(no_packet, 0, sizeof(no_packet)); + usleep(10000); + continue; + } + break; + } + + /* finish if limit size exhausted */ + if (limit_filesize != 0 && limit_filesize <= avio_tell(output_files[0]->pb)) + break; + + /* read a frame from it and output it in the fifo */ + is = input_files[file_index].ctx; + ret= av_read_frame(is, &pkt); + if(ret == AVERROR(EAGAIN)){ + no_packet[file_index]=1; + no_packet_count++; + continue; + } + if (ret < 0) { + input_files[file_index].eof_reached = 1; + if (opt_shortest) + break; + else + continue; + } + + no_packet_count=0; + memset(no_packet, 0, sizeof(no_packet)); + + if (do_pkt_dump) { + av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump, + is->streams[pkt.stream_index]); + } + /* the following test is needed in case new streams appear + dynamically in stream : we ignore them */ + if (pkt.stream_index >= input_files[file_index].ctx->nb_streams) + goto discard_packet; + ist_index = input_files[file_index].ist_index + pkt.stream_index; + ist = &input_streams[ist_index]; + if (ist->discard) + goto discard_packet; + + if (pkt.dts != AV_NOPTS_VALUE) + pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); + if (pkt.pts != AV_NOPTS_VALUE) + pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); + + if (ist->ts_scale) { + if(pkt.pts != AV_NOPTS_VALUE) + pkt.pts *= ist->ts_scale; + if(pkt.dts != AV_NOPTS_VALUE) + pkt.dts *= ist->ts_scale; + } + +// fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files[ist->file_index].ts_offset, ist->st->codec->codec_type); + if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE + && (is->iformat->flags & AVFMT_TS_DISCONT)) { + int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); + int64_t delta= pkt_dts - ist->next_pts; + if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1pts)&& !copy_ts){ + input_files[ist->file_index].ts_offset -= delta; + if (verbose > 2) + fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", + delta, input_files[ist->file_index].ts_offset); + pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); + if(pkt.pts != AV_NOPTS_VALUE) + pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); + } + } + + /* finish if recording time exhausted */ + if (recording_time != INT64_MAX && + av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) >= 0) { + ist->is_past_recording_time = 1; + goto discard_packet; + } + + //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size); + if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) { + + if (verbose >= 0) + fprintf(stderr, "Error while decoding stream #%d.%d\n", + ist->file_index, ist->st->index); + if (exit_on_error) + exit_program(1); + av_free_packet(&pkt); + goto redo; + } + + discard_packet: + av_free_packet(&pkt); + + /* dump report by using the output first video and audio streams */ + print_report(output_files, ost_table, nb_ostreams, 0); + } + + /* at the end of stream, we must flush the decoder buffers */ + for (i = 0; i < nb_input_streams; i++) { + ist = &input_streams[i]; + if (ist->decoding_needed) { + output_packet(ist, i, ost_table, nb_ostreams, NULL); + } + } + + term_exit(); + + /* write the trailer if needed and close file */ + for(i=0;iencoding_needed) { + av_freep(&ost->st->codec->stats_in); + avcodec_close(ost->st->codec); + } +#if CONFIG_AVFILTER + avfilter_graph_free(&ost->graph); +#endif + } + + /* close each decoder */ + for (i = 0; i < nb_input_streams; i++) { + ist = &input_streams[i]; + if (ist->decoding_needed) { + avcodec_close(ist->st->codec); + } + } + + /* finished ! */ + ret = 0; + + fail: + av_freep(&bit_buffer); + + if (ost_table) { + for(i=0;ist->stream_copy) + av_freep(&ost->st->codec->extradata); + if (ost->logfile) { + fclose(ost->logfile); + ost->logfile = NULL; + } + av_fifo_free(ost->fifo); /* works even if fifo is not + initialized but set to zero */ + av_freep(&ost->st->codec->subtitle_header); + av_free(ost->pict_tmp.data[0]); + av_free(ost->forced_kf_pts); + if (ost->video_resample) + sws_freeContext(ost->img_resample_ctx); + if (ost->resample) + audio_resample_close(ost->resample); + if (ost->reformat_ctx) + av_audio_convert_free(ost->reformat_ctx); + av_dict_free(&ost->opts); + av_free(ost); + } + } + av_free(ost_table); + } + return ret; +} + +static int opt_format(const char *opt, const char *arg) +{ + last_asked_format = arg; + return 0; +} + +static int opt_video_rc_override_string(const char *opt, const char *arg) +{ + video_rc_override_string = arg; + return 0; +} + +static int opt_me_threshold(const char *opt, const char *arg) +{ + me_threshold = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX); + return 0; +} + +static int opt_verbose(const char *opt, const char *arg) +{ + verbose = parse_number_or_die(opt, arg, OPT_INT64, -10, 10); + return 0; +} + +static int opt_frame_rate(const char *opt, const char *arg) +{ + if (av_parse_video_rate(&frame_rate, arg) < 0) { + fprintf(stderr, "Incorrect value for %s: %s\n", opt, arg); + exit_program(1); + } + return 0; +} + +static int opt_frame_crop(const char *opt, const char *arg) +{ + fprintf(stderr, "Option '%s' has been removed, use the crop filter instead\n", opt); + return AVERROR(EINVAL); +} + +static int opt_frame_size(const char *opt, const char *arg) +{ + if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) { + fprintf(stderr, "Incorrect frame size\n"); + return AVERROR(EINVAL); + } + return 0; +} + +static int opt_pad(const char *opt, const char *arg) { + fprintf(stderr, "Option '%s' has been removed, use the pad filter instead\n", opt); + return -1; +} + +static int opt_frame_pix_fmt(const char *opt, const char *arg) +{ + if (strcmp(arg, "list")) { + frame_pix_fmt = av_get_pix_fmt(arg); + if (frame_pix_fmt == PIX_FMT_NONE) { + fprintf(stderr, "Unknown pixel format requested: %s\n", arg); + return AVERROR(EINVAL); + } + } else { + show_pix_fmts(); + exit_program(0); + } + return 0; +} + +static int opt_frame_aspect_ratio(const char *opt, const char *arg) +{ + int x = 0, y = 0; + double ar = 0; + const char *p; + char *end; + + p = strchr(arg, ':'); + if (p) { + x = strtol(arg, &end, 10); + if (end == p) + y = strtol(end+1, &end, 10); + if (x > 0 && y > 0) + ar = (double)x / (double)y; + } else + ar = strtod(arg, NULL); + + if (!ar) { + fprintf(stderr, "Incorrect aspect ratio specification.\n"); + return AVERROR(EINVAL); + } + frame_aspect_ratio = ar; + return 0; +} + +static int opt_metadata(const char *opt, const char *arg) +{ + char *mid= strchr(arg, '='); + + if(!mid){ + fprintf(stderr, "Missing =\n"); + exit_program(1); + } + *mid++= 0; + + av_dict_set(&metadata, arg, mid, 0); + + return 0; +} + +static int opt_qscale(const char *opt, const char *arg) +{ + video_qscale = parse_number_or_die(opt, arg, OPT_FLOAT, 0, 255); + if (video_qscale == 0) { + fprintf(stderr, "qscale must be > 0.0 and <= 255\n"); + return AVERROR(EINVAL); + } + return 0; +} + +static int opt_top_field_first(const char *opt, const char *arg) +{ + top_field_first = parse_number_or_die(opt, arg, OPT_INT, 0, 1); + return 0; +} + +static int opt_thread_count(const char *opt, const char *arg) +{ + thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX); +#if !HAVE_THREADS + if (verbose >= 0) + fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n"); +#endif + return 0; +} + +static int opt_audio_sample_fmt(const char *opt, const char *arg) +{ + if (strcmp(arg, "list")) { + audio_sample_fmt = av_get_sample_fmt(arg); + if (audio_sample_fmt == AV_SAMPLE_FMT_NONE) { + av_log(NULL, AV_LOG_ERROR, "Invalid sample format '%s'\n", arg); + return AVERROR(EINVAL); + } + } else { + int i; + char fmt_str[128]; + for (i = -1; i < AV_SAMPLE_FMT_NB; i++) + printf("%s\n", av_get_sample_fmt_string(fmt_str, sizeof(fmt_str), i)); + exit_program(0); + } + return 0; +} + +static int opt_audio_rate(const char *opt, const char *arg) +{ + audio_sample_rate = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX); + return 0; +} + +static int opt_audio_channels(const char *opt, const char *arg) +{ + audio_channels = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX); + return 0; +} + +static int opt_video_channel(const char *opt, const char *arg) +{ + av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -channel.\n"); + opt_default("channel", arg); + return 0; +} + +static int opt_video_standard(const char *opt, const char *arg) +{ + av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -standard.\n"); + opt_default("standard", arg); + return 0; +} + +static int opt_codec(int *pstream_copy, char **pcodec_name, + int codec_type, const char *arg) +{ + av_freep(pcodec_name); + if (!strcmp(arg, "copy")) { + *pstream_copy = 1; + } else { + *pcodec_name = av_strdup(arg); + } + return 0; +} + +static int opt_audio_codec(const char *opt, const char *arg) +{ + return opt_codec(&audio_stream_copy, &audio_codec_name, AVMEDIA_TYPE_AUDIO, arg); +} + +static int opt_video_codec(const char *opt, const char *arg) +{ + return opt_codec(&video_stream_copy, &video_codec_name, AVMEDIA_TYPE_VIDEO, arg); +} + +static int opt_subtitle_codec(const char *opt, const char *arg) +{ + return opt_codec(&subtitle_stream_copy, &subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, arg); +} + +static int opt_data_codec(const char *opt, const char *arg) +{ + return opt_codec(&data_stream_copy, &data_codec_name, AVMEDIA_TYPE_DATA, arg); +} + +static int opt_codec_tag(const char *opt, const char *arg) +{ + char *tail; + uint32_t *codec_tag; + + codec_tag = !strcmp(opt, "atag") ? &audio_codec_tag : + !strcmp(opt, "vtag") ? &video_codec_tag : + !strcmp(opt, "stag") ? &subtitle_codec_tag : NULL; + if (!codec_tag) + return -1; + + *codec_tag = strtol(arg, &tail, 0); + if (!tail || *tail) + *codec_tag = AV_RL32(arg); + + return 0; +} + +static int opt_map(const char *opt, const char *arg) +{ + StreamMap *m; + char *p; + + stream_maps = grow_array(stream_maps, sizeof(*stream_maps), &nb_stream_maps, nb_stream_maps + 1); + m = &stream_maps[nb_stream_maps-1]; + + m->file_index = strtol(arg, &p, 0); + if (*p) + p++; + + m->stream_index = strtol(p, &p, 0); + if (*p) { + p++; + m->sync_file_index = strtol(p, &p, 0); + if (*p) + p++; + m->sync_stream_index = strtol(p, &p, 0); + } else { + m->sync_file_index = m->file_index; + m->sync_stream_index = m->stream_index; + } + return 0; +} + +static void parse_meta_type(char *arg, char *type, int *index, char **endptr) +{ + *endptr = arg; + if (*arg == ',') { + *type = *(++arg); + switch (*arg) { + case 'g': + break; + case 's': + case 'c': + case 'p': + *index = strtol(++arg, endptr, 0); + break; + default: + fprintf(stderr, "Invalid metadata type %c.\n", *arg); + exit_program(1); + } + } else + *type = 'g'; +} + +static int opt_map_metadata(const char *opt, const char *arg) +{ + MetadataMap *m, *m1; + char *p; + + meta_data_maps = grow_array(meta_data_maps, sizeof(*meta_data_maps), + &nb_meta_data_maps, nb_meta_data_maps + 1); + + m = &meta_data_maps[nb_meta_data_maps - 1][0]; + m->file = strtol(arg, &p, 0); + parse_meta_type(p, &m->type, &m->index, &p); + if (*p) + p++; + + m1 = &meta_data_maps[nb_meta_data_maps - 1][1]; + m1->file = strtol(p, &p, 0); + parse_meta_type(p, &m1->type, &m1->index, &p); + + if (m->type == 'g' || m1->type == 'g') + metadata_global_autocopy = 0; + if (m->type == 's' || m1->type == 's') + metadata_streams_autocopy = 0; + if (m->type == 'c' || m1->type == 'c') + metadata_chapters_autocopy = 0; + + return 0; +} + +static int opt_map_meta_data(const char *opt, const char *arg) +{ + fprintf(stderr, "-map_meta_data is deprecated and will be removed soon. " + "Use -map_metadata instead.\n"); + return opt_map_metadata(opt, arg); +} + +static int opt_map_chapters(const char *opt, const char *arg) +{ + ChapterMap *c; + char *p; + + chapter_maps = grow_array(chapter_maps, sizeof(*chapter_maps), &nb_chapter_maps, + nb_chapter_maps + 1); + c = &chapter_maps[nb_chapter_maps - 1]; + c->out_file = strtol(arg, &p, 0); + if (*p) + p++; + + c->in_file = strtol(p, &p, 0); + return 0; +} + +static int opt_input_ts_scale(const char *opt, const char *arg) +{ + unsigned int stream; + double scale; + char *p; + + stream = strtol(arg, &p, 0); + if (*p) + p++; + scale= strtod(p, &p); + + ts_scale = grow_array(ts_scale, sizeof(*ts_scale), &nb_ts_scale, stream + 1); + ts_scale[stream] = scale; + return 0; +} + +static int opt_recording_time(const char *opt, const char *arg) +{ + recording_time = parse_time_or_die(opt, arg, 1); + return 0; +} + +static int opt_start_time(const char *opt, const char *arg) +{ + start_time = parse_time_or_die(opt, arg, 1); + return 0; +} + +static int opt_recording_timestamp(const char *opt, const char *arg) +{ + char buf[128]; + int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6; + struct tm time = *gmtime((time_t*)&recording_timestamp); + strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time); + opt_metadata("metadata", buf); + + av_log(NULL, AV_LOG_WARNING, "%s is deprecated, set the 'creation_time' metadata " + "tag instead.\n", opt); + return 0; +} + +static int opt_input_ts_offset(const char *opt, const char *arg) +{ + input_ts_offset = parse_time_or_die(opt, arg, 1); + return 0; +} + +static enum CodecID find_codec_or_die(const char *name, int type, int encoder) +{ + const char *codec_string = encoder ? "encoder" : "decoder"; + AVCodec *codec; + + if(!name) + return CODEC_ID_NONE; + codec = encoder ? + avcodec_find_encoder_by_name(name) : + avcodec_find_decoder_by_name(name); + if(!codec) { + fprintf(stderr, "Unknown %s '%s'\n", codec_string, name); + exit_program(1); + } + if(codec->type != type) { + fprintf(stderr, "Invalid %s type '%s'\n", codec_string, name); + exit_program(1); + } + return codec->id; +} + +static int opt_input_file(const char *opt, const char *filename) +{ + AVFormatContext *ic; + AVInputFormat *file_iformat = NULL; + int err, i, ret, rfps, rfps_base; + int64_t timestamp; + uint8_t buf[128]; + AVDictionary **opts; + int orig_nb_streams; // number of streams before avformat_find_stream_info + + if (last_asked_format) { + if (!(file_iformat = av_find_input_format(last_asked_format))) { + fprintf(stderr, "Unknown input format: '%s'\n", last_asked_format); + exit_program(1); + } + last_asked_format = NULL; + } + + if (!strcmp(filename, "-")) + filename = "pipe:"; + + using_stdin |= !strncmp(filename, "pipe:", 5) || + !strcmp(filename, "/dev/stdin"); + + /* get default parameters from command line */ + ic = avformat_alloc_context(); + if (!ic) { + print_error(filename, AVERROR(ENOMEM)); + exit_program(1); + } + if (audio_sample_rate) { + snprintf(buf, sizeof(buf), "%d", audio_sample_rate); + av_dict_set(&format_opts, "sample_rate", buf, 0); + } + if (audio_channels) { + snprintf(buf, sizeof(buf), "%d", audio_channels); + av_dict_set(&format_opts, "channels", buf, 0); + } + if (frame_rate.num) { + snprintf(buf, sizeof(buf), "%d/%d", frame_rate.num, frame_rate.den); + av_dict_set(&format_opts, "framerate", buf, 0); + } + if (frame_width && frame_height) { + snprintf(buf, sizeof(buf), "%dx%d", frame_width, frame_height); + av_dict_set(&format_opts, "video_size", buf, 0); + } + if (frame_pix_fmt != PIX_FMT_NONE) + av_dict_set(&format_opts, "pixel_format", av_get_pix_fmt_name(frame_pix_fmt), 0); + + ic->video_codec_id = + find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0); + ic->audio_codec_id = + find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0); + ic->subtitle_codec_id= + find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0); + ic->flags |= AVFMT_FLAG_NONBLOCK; + + /* open the input file with generic libav function */ + err = avformat_open_input(&ic, filename, file_iformat, &format_opts); + if (err < 0) { + print_error(filename, err); + exit_program(1); + } + assert_avoptions(format_opts); + + if(opt_programid) { + int i, j; + int found=0; + for(i=0; inb_streams; i++){ + ic->streams[i]->discard= AVDISCARD_ALL; + } + for(i=0; inb_programs; i++){ + AVProgram *p= ic->programs[i]; + if(p->id != opt_programid){ + p->discard = AVDISCARD_ALL; + }else{ + found=1; + for(j=0; jnb_stream_indexes; j++){ + ic->streams[p->stream_index[j]]->discard= AVDISCARD_DEFAULT; + } + } + } + if(!found){ + fprintf(stderr, "Specified program id not found\n"); + exit_program(1); + } + opt_programid=0; + } + + if (loop_input) { + av_log(NULL, AV_LOG_WARNING, "-loop_input is deprecated, use -loop 1\n"); + ic->loop_input = loop_input; + } + + /* Set AVCodecContext options for avformat_find_stream_info */ + opts = setup_find_stream_info_opts(ic, codec_opts); + orig_nb_streams = ic->nb_streams; + + /* If not enough info to get the stream parameters, we decode the + first frames to get it. (used in mpeg case for example) */ + ret = avformat_find_stream_info(ic, opts); + if (ret < 0 && verbose >= 0) { + fprintf(stderr, "%s: could not find codec parameters\n", filename); + av_close_input_file(ic); + exit_program(1); + } + + timestamp = start_time; + /* add the stream start time */ + if (ic->start_time != AV_NOPTS_VALUE) + timestamp += ic->start_time; + + /* if seeking requested, we execute it */ + if (start_time != 0) { + ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD); + if (ret < 0) { + fprintf(stderr, "%s: could not seek to position %0.3f\n", + filename, (double)timestamp / AV_TIME_BASE); + } + /* reset seek info */ + start_time = 0; + } + + /* update the current parameters so that they match the one of the input stream */ + for(i=0;inb_streams;i++) { + AVStream *st = ic->streams[i]; + AVCodecContext *dec = st->codec; + InputStream *ist; + + dec->thread_count = thread_count; + + input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1); + ist = &input_streams[nb_input_streams - 1]; + ist->st = st; + ist->file_index = nb_input_files; + ist->discard = 1; + ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, 0); + + if (i < nb_ts_scale) + ist->ts_scale = ts_scale[i]; + + switch (dec->codec_type) { + case AVMEDIA_TYPE_AUDIO: + ist->dec = avcodec_find_decoder_by_name(audio_codec_name); + if(audio_disable) + st->discard= AVDISCARD_ALL; + break; + case AVMEDIA_TYPE_VIDEO: + ist->dec = avcodec_find_decoder_by_name(video_codec_name); + rfps = ic->streams[i]->r_frame_rate.num; + rfps_base = ic->streams[i]->r_frame_rate.den; + if (dec->lowres) { + dec->flags |= CODEC_FLAG_EMU_EDGE; + dec->height >>= dec->lowres; + dec->width >>= dec->lowres; + } + if(me_threshold) + dec->debug |= FF_DEBUG_MV; + + if (dec->time_base.den != rfps*dec->ticks_per_frame || dec->time_base.num != rfps_base) { + + if (verbose >= 0) + fprintf(stderr,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n", + i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num, + + (float)rfps / rfps_base, rfps, rfps_base); + } + + if(video_disable) + st->discard= AVDISCARD_ALL; + else if(video_discard) + st->discard= video_discard; + break; + case AVMEDIA_TYPE_DATA: + break; + case AVMEDIA_TYPE_SUBTITLE: + ist->dec = avcodec_find_decoder_by_name(subtitle_codec_name); + if(subtitle_disable) + st->discard = AVDISCARD_ALL; + break; + case AVMEDIA_TYPE_ATTACHMENT: + case AVMEDIA_TYPE_UNKNOWN: + break; + default: + abort(); + } + } + + /* dump the file content */ + if (verbose >= 0) + av_dump_format(ic, nb_input_files, filename, 0); + + input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1); + input_files[nb_input_files - 1].ctx = ic; + input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams; + input_files[nb_input_files - 1].ts_offset = input_ts_offset - (copy_ts ? 0 : timestamp); + + frame_rate = (AVRational){0, 0}; + frame_pix_fmt = PIX_FMT_NONE; + frame_height = 0; + frame_width = 0; + audio_sample_rate = 0; + audio_channels = 0; + audio_sample_fmt = AV_SAMPLE_FMT_NONE; + av_freep(&ts_scale); + nb_ts_scale = 0; + + for (i = 0; i < orig_nb_streams; i++) + av_dict_free(&opts[i]); + av_freep(&opts); + av_freep(&video_codec_name); + av_freep(&audio_codec_name); + av_freep(&subtitle_codec_name); + uninit_opts(); + init_opts(); + return 0; +} + +static void check_inputs(int *has_video_ptr, + int *has_audio_ptr, + int *has_subtitle_ptr, + int *has_data_ptr) +{ + int has_video, has_audio, has_subtitle, has_data, i, j; + AVFormatContext *ic; + + has_video = 0; + has_audio = 0; + has_subtitle = 0; + has_data = 0; + + for(j=0;jnb_streams;i++) { + AVCodecContext *enc = ic->streams[i]->codec; + switch(enc->codec_type) { + case AVMEDIA_TYPE_AUDIO: + has_audio = 1; + break; + case AVMEDIA_TYPE_VIDEO: + has_video = 1; + break; + case AVMEDIA_TYPE_SUBTITLE: + has_subtitle = 1; + break; + case AVMEDIA_TYPE_DATA: + case AVMEDIA_TYPE_ATTACHMENT: + case AVMEDIA_TYPE_UNKNOWN: + has_data = 1; + break; + default: + abort(); + } + } + } + *has_video_ptr = has_video; + *has_audio_ptr = has_audio; + *has_subtitle_ptr = has_subtitle; + *has_data_ptr = has_data; +} + +static void new_video_stream(AVFormatContext *oc, int file_idx) +{ + AVStream *st; + OutputStream *ost; + AVCodecContext *video_enc; + enum CodecID codec_id = CODEC_ID_NONE; + AVCodec *codec= NULL; + + if(!video_stream_copy){ + if (video_codec_name) { + codec_id = find_codec_or_die(video_codec_name, AVMEDIA_TYPE_VIDEO, 1); + codec = avcodec_find_encoder_by_name(video_codec_name); + } else { + codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO); + codec = avcodec_find_encoder(codec_id); + } + } + + ost = new_output_stream(oc, file_idx, codec); + st = ost->st; + if (!video_stream_copy) { + ost->frame_aspect_ratio = frame_aspect_ratio; + frame_aspect_ratio = 0; +#if CONFIG_AVFILTER + ost->avfilter= vfilters; + vfilters = NULL; +#endif + } + + ost->bitstream_filters = video_bitstream_filters; + video_bitstream_filters= NULL; + + st->codec->thread_count= thread_count; + + video_enc = st->codec; + + if(video_codec_tag) + video_enc->codec_tag= video_codec_tag; + + if(oc->oformat->flags & AVFMT_GLOBALHEADER) { + video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; + } + + video_enc->codec_type = AVMEDIA_TYPE_VIDEO; + if (video_stream_copy) { + st->stream_copy = 1; + video_enc->sample_aspect_ratio = + st->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255); + } else { + const char *p; + int i; + + if (frame_rate.num) + ost->frame_rate = frame_rate; + video_enc->codec_id = codec_id; + + video_enc->width = frame_width; + video_enc->height = frame_height; + video_enc->pix_fmt = frame_pix_fmt; + st->sample_aspect_ratio = video_enc->sample_aspect_ratio; + + if (intra_only) + video_enc->gop_size = 0; + if (video_qscale || same_quality) { + video_enc->flags |= CODEC_FLAG_QSCALE; + video_enc->global_quality = FF_QP2LAMBDA * video_qscale; + } + + if(intra_matrix) + video_enc->intra_matrix = intra_matrix; + if(inter_matrix) + video_enc->inter_matrix = inter_matrix; + + p= video_rc_override_string; + for(i=0; p; i++){ + int start, end, q; + int e=sscanf(p, "%d,%d,%d", &start, &end, &q); + if(e!=3){ + fprintf(stderr, "error parsing rc_override\n"); + exit_program(1); + } + video_enc->rc_override= + av_realloc(video_enc->rc_override, + sizeof(RcOverride)*(i+1)); + video_enc->rc_override[i].start_frame= start; + video_enc->rc_override[i].end_frame = end; + if(q>0){ + video_enc->rc_override[i].qscale= q; + video_enc->rc_override[i].quality_factor= 1.0; + } + else{ + video_enc->rc_override[i].qscale= 0; + video_enc->rc_override[i].quality_factor= -q/100.0; + } + p= strchr(p, '/'); + if(p) p++; + } + video_enc->rc_override_count=i; + if (!video_enc->rc_initial_buffer_occupancy) + video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size*3/4; + video_enc->me_threshold= me_threshold; + video_enc->intra_dc_precision= intra_dc_precision - 8; + + if (do_psnr) + video_enc->flags|= CODEC_FLAG_PSNR; + + /* two pass mode */ + if (do_pass) { + if (do_pass == 1) { + video_enc->flags |= CODEC_FLAG_PASS1; + } else { + video_enc->flags |= CODEC_FLAG_PASS2; + } + } + + if (forced_key_frames) + parse_forced_key_frames(forced_key_frames, ost, video_enc); + } + if (video_language) { + av_dict_set(&st->metadata, "language", video_language, 0); + av_freep(&video_language); + } + + /* reset some key parameters */ + video_disable = 0; + av_freep(&video_codec_name); + av_freep(&forced_key_frames); + video_stream_copy = 0; + frame_pix_fmt = PIX_FMT_NONE; +} + +static void new_audio_stream(AVFormatContext *oc, int file_idx) +{ + AVStream *st; + OutputStream *ost; + AVCodec *codec= NULL; + AVCodecContext *audio_enc; + enum CodecID codec_id = CODEC_ID_NONE; + + if(!audio_stream_copy){ + if (audio_codec_name) { + codec_id = find_codec_or_die(audio_codec_name, AVMEDIA_TYPE_AUDIO, 1); + codec = avcodec_find_encoder_by_name(audio_codec_name); + } else { + codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_AUDIO); + codec = avcodec_find_encoder(codec_id); + } + } + ost = new_output_stream(oc, file_idx, codec); + st = ost->st; + + ost->bitstream_filters = audio_bitstream_filters; + audio_bitstream_filters= NULL; + + st->codec->thread_count= thread_count; + + audio_enc = st->codec; + audio_enc->codec_type = AVMEDIA_TYPE_AUDIO; + + if(audio_codec_tag) + audio_enc->codec_tag= audio_codec_tag; + + if (oc->oformat->flags & AVFMT_GLOBALHEADER) { + audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; + } + if (audio_stream_copy) { + st->stream_copy = 1; + } else { + audio_enc->codec_id = codec_id; + + if (audio_qscale > QSCALE_NONE) { + audio_enc->flags |= CODEC_FLAG_QSCALE; + audio_enc->global_quality = FF_QP2LAMBDA * audio_qscale; + } + if (audio_channels) + audio_enc->channels = audio_channels; + if (audio_sample_fmt != AV_SAMPLE_FMT_NONE) + audio_enc->sample_fmt = audio_sample_fmt; + if (audio_sample_rate) + audio_enc->sample_rate = audio_sample_rate; + } + if (audio_language) { + av_dict_set(&st->metadata, "language", audio_language, 0); + av_freep(&audio_language); + } + + /* reset some key parameters */ + audio_disable = 0; + av_freep(&audio_codec_name); + audio_stream_copy = 0; +} + +static void new_data_stream(AVFormatContext *oc, int file_idx) +{ + AVStream *st; + OutputStream *ost; + AVCodecContext *data_enc; + + ost = new_output_stream(oc, file_idx, NULL); + st = ost->st; + data_enc = st->codec; + if (!data_stream_copy) { + fprintf(stderr, "Data stream encoding not supported yet (only streamcopy)\n"); + exit_program(1); + } + + data_enc->codec_type = AVMEDIA_TYPE_DATA; + + if (data_codec_tag) + data_enc->codec_tag= data_codec_tag; + + if (oc->oformat->flags & AVFMT_GLOBALHEADER) { + data_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; + } + if (data_stream_copy) { + st->stream_copy = 1; + } + + data_disable = 0; + av_freep(&data_codec_name); + data_stream_copy = 0; +} + +static void new_subtitle_stream(AVFormatContext *oc, int file_idx) +{ + AVStream *st; + OutputStream *ost; + AVCodec *codec=NULL; + AVCodecContext *subtitle_enc; + enum CodecID codec_id = CODEC_ID_NONE; + + if(!subtitle_stream_copy){ + if (subtitle_codec_name) { + codec_id = find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 1); + codec = avcodec_find_encoder_by_name(subtitle_codec_name); + } else { + codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_SUBTITLE); + codec = avcodec_find_encoder(codec_id); + } + } + ost = new_output_stream(oc, file_idx, codec); + st = ost->st; + subtitle_enc = st->codec; + + ost->bitstream_filters = subtitle_bitstream_filters; + subtitle_bitstream_filters= NULL; + + subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE; + + if(subtitle_codec_tag) + subtitle_enc->codec_tag= subtitle_codec_tag; + + if (oc->oformat->flags & AVFMT_GLOBALHEADER) { + subtitle_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; + } + if (subtitle_stream_copy) { + st->stream_copy = 1; + } else { + subtitle_enc->codec_id = codec_id; + } + + if (subtitle_language) { + av_dict_set(&st->metadata, "language", subtitle_language, 0); + av_freep(&subtitle_language); + } + + subtitle_disable = 0; + av_freep(&subtitle_codec_name); + subtitle_stream_copy = 0; +} + +static int opt_new_stream(const char *opt, const char *arg) +{ + AVFormatContext *oc; + int file_idx = nb_output_files - 1; + if (nb_output_files <= 0) { + fprintf(stderr, "At least one output file must be specified\n"); + exit_program(1); + } + oc = output_files[file_idx]; + + if (!strcmp(opt, "newvideo" )) new_video_stream (oc, file_idx); + else if (!strcmp(opt, "newaudio" )) new_audio_stream (oc, file_idx); + else if (!strcmp(opt, "newsubtitle")) new_subtitle_stream(oc, file_idx); + else if (!strcmp(opt, "newdata" )) new_data_stream (oc, file_idx); + else av_assert0(0); + return 0; +} + +/* arg format is "output-stream-index:streamid-value". */ +static int opt_streamid(const char *opt, const char *arg) +{ + int idx; + char *p; + char idx_str[16]; + + av_strlcpy(idx_str, arg, sizeof(idx_str)); + p = strchr(idx_str, ':'); + if (!p) { + fprintf(stderr, + "Invalid value '%s' for option '%s', required syntax is 'index:value'\n", + arg, opt); + exit_program(1); + } + *p++ = '\0'; + idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX); + streamid_map = grow_array(streamid_map, sizeof(*streamid_map), &nb_streamid_map, idx+1); + streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX); + return 0; +} + +static void opt_output_file(const char *filename) +{ + AVFormatContext *oc; + int err, use_video, use_audio, use_subtitle, use_data; + int input_has_video, input_has_audio, input_has_subtitle, input_has_data; + AVOutputFormat *file_oformat; + + if (!strcmp(filename, "-")) + filename = "pipe:"; + + oc = avformat_alloc_context(); + if (!oc) { + print_error(filename, AVERROR(ENOMEM)); + exit_program(1); + } + + if (last_asked_format) { + file_oformat = av_guess_format(last_asked_format, NULL, NULL); + if (!file_oformat) { + fprintf(stderr, "Requested output format '%s' is not a suitable output format\n", last_asked_format); + exit_program(1); + } + last_asked_format = NULL; + } else { + file_oformat = av_guess_format(NULL, filename, NULL); + if (!file_oformat) { + fprintf(stderr, "Unable to find a suitable output format for '%s'\n", + filename); + exit_program(1); + } + } + + oc->oformat = file_oformat; + av_strlcpy(oc->filename, filename, sizeof(oc->filename)); + + if (!strcmp(file_oformat->name, "ffm") && + av_strstart(filename, "http:", NULL)) { + /* special case for files sent to avserver: we get the stream + parameters from avserver */ + int err = read_avserver_streams(oc, filename); + if (err < 0) { + print_error(filename, err); + exit_program(1); + } + } else { + use_video = file_oformat->video_codec != CODEC_ID_NONE || video_stream_copy || video_codec_name; + use_audio = file_oformat->audio_codec != CODEC_ID_NONE || audio_stream_copy || audio_codec_name; + use_subtitle = file_oformat->subtitle_codec != CODEC_ID_NONE || subtitle_stream_copy || subtitle_codec_name; + use_data = data_stream_copy || data_codec_name; /* XXX once generic data codec will be available add a ->data_codec reference and use it here */ + + /* disable if no corresponding type found */ + check_inputs(&input_has_video, + &input_has_audio, + &input_has_subtitle, + &input_has_data); + + if (!input_has_video) + use_video = 0; + if (!input_has_audio) + use_audio = 0; + if (!input_has_subtitle) + use_subtitle = 0; + if (!input_has_data) + use_data = 0; + + /* manual disable */ + if (audio_disable) use_audio = 0; + if (video_disable) use_video = 0; + if (subtitle_disable) use_subtitle = 0; + if (data_disable) use_data = 0; + + if (use_video) new_video_stream(oc, nb_output_files); + if (use_audio) new_audio_stream(oc, nb_output_files); + if (use_subtitle) new_subtitle_stream(oc, nb_output_files); + if (use_data) new_data_stream(oc, nb_output_files); + + av_dict_copy(&oc->metadata, metadata, 0); + av_dict_free(&metadata); + } + + av_dict_copy(&output_opts[nb_output_files], format_opts, 0); + output_files[nb_output_files++] = oc; + + /* check filename in case of an image number is expected */ + if (oc->oformat->flags & AVFMT_NEEDNUMBER) { + if (!av_filename_number_test(oc->filename)) { + print_error(oc->filename, AVERROR(EINVAL)); + exit_program(1); + } + } + + if (!(oc->oformat->flags & AVFMT_NOFILE)) { + /* test if it already exists to avoid loosing precious files */ + if (!file_overwrite && + (strchr(filename, ':') == NULL || + filename[1] == ':' || + av_strstart(filename, "file:", NULL))) { + if (avio_check(filename, 0) == 0) { + if (!using_stdin) { + fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename); + fflush(stderr); + if (!read_yesno()) { + fprintf(stderr, "Not overwriting - exiting\n"); + exit_program(1); + } + } + else { + fprintf(stderr,"File '%s' already exists. Exiting.\n", filename); + exit_program(1); + } + } + } + + /* open the file */ + if ((err = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE)) < 0) { + print_error(filename, err); + exit_program(1); + } + } + + oc->preload= (int)(mux_preload*AV_TIME_BASE); + oc->max_delay= (int)(mux_max_delay*AV_TIME_BASE); + if (loop_output >= 0) { + av_log(NULL, AV_LOG_WARNING, "-loop_output is deprecated, use -loop\n"); + oc->loop_output = loop_output; + } + oc->flags |= AVFMT_FLAG_NONBLOCK; + + frame_rate = (AVRational){0, 0}; + frame_width = 0; + frame_height = 0; + audio_sample_rate = 0; + audio_channels = 0; + audio_sample_fmt = AV_SAMPLE_FMT_NONE; + + av_freep(&forced_key_frames); + uninit_opts(); + init_opts(); +} + +/* same option as mencoder */ +static int opt_pass(const char *opt, const char *arg) +{ + do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2); + return 0; +} + +static int64_t getutime(void) +{ +#if HAVE_GETRUSAGE + struct rusage rusage; + + getrusage(RUSAGE_SELF, &rusage); + return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec; +#elif HAVE_GETPROCESSTIMES + HANDLE proc; + FILETIME c, e, k, u; + proc = GetCurrentProcess(); + GetProcessTimes(proc, &c, &e, &k, &u); + return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10; +#else + return av_gettime(); +#endif +} + +static int64_t getmaxrss(void) +{ +#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS + struct rusage rusage; + getrusage(RUSAGE_SELF, &rusage); + return (int64_t)rusage.ru_maxrss * 1024; +#elif HAVE_GETPROCESSMEMORYINFO + HANDLE proc; + PROCESS_MEMORY_COUNTERS memcounters; + proc = GetCurrentProcess(); + memcounters.cb = sizeof(memcounters); + GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters)); + return memcounters.PeakPagefileUsage; +#else + return 0; +#endif +} + +static void parse_matrix_coeffs(uint16_t *dest, const char *str) +{ + int i; + const char *p = str; + for(i = 0;; i++) { + dest[i] = atoi(p); + if(i == 63) + break; + p = strchr(p, ','); + if(!p) { + fprintf(stderr, "Syntax error in matrix \"%s\" at coeff %d\n", str, i); + exit_program(1); + } + p++; + } +} + +static void opt_inter_matrix(const char *arg) +{ + inter_matrix = av_mallocz(sizeof(uint16_t) * 64); + parse_matrix_coeffs(inter_matrix, arg); +} + +static void opt_intra_matrix(const char *arg) +{ + intra_matrix = av_mallocz(sizeof(uint16_t) * 64); + parse_matrix_coeffs(intra_matrix, arg); +} + +static void show_usage(void) +{ + printf("Hyper fast Audio and Video encoder\n"); + printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name); + printf("\n"); +} + +static void show_help(void) +{ + AVCodec *c; + AVOutputFormat *oformat = NULL; + AVInputFormat *iformat = NULL; + + av_log_set_callback(log_callback_help); + show_usage(); + show_help_options(options, "Main options:\n", + OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0); + show_help_options(options, "\nAdvanced options:\n", + OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, + OPT_EXPERT); + show_help_options(options, "\nVideo options:\n", + OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB, + OPT_VIDEO); + show_help_options(options, "\nAdvanced Video options:\n", + OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB, + OPT_VIDEO | OPT_EXPERT); + show_help_options(options, "\nAudio options:\n", + OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB, + OPT_AUDIO); + show_help_options(options, "\nAdvanced Audio options:\n", + OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB, + OPT_AUDIO | OPT_EXPERT); + show_help_options(options, "\nSubtitle options:\n", + OPT_SUBTITLE | OPT_GRAB, + OPT_SUBTITLE); + show_help_options(options, "\nAudio/Video grab options:\n", + OPT_GRAB, + OPT_GRAB); + printf("\n"); + av_opt_show2(avcodec_opts[0], NULL, AV_OPT_FLAG_ENCODING_PARAM|AV_OPT_FLAG_DECODING_PARAM, 0); + printf("\n"); + + /* individual codec options */ + c = NULL; + while ((c = av_codec_next(c))) { + if (c->priv_class) { + av_opt_show2(&c->priv_class, NULL, AV_OPT_FLAG_ENCODING_PARAM|AV_OPT_FLAG_DECODING_PARAM, 0); + printf("\n"); + } + } + + av_opt_show2(avformat_opts, NULL, AV_OPT_FLAG_ENCODING_PARAM|AV_OPT_FLAG_DECODING_PARAM, 0); + printf("\n"); + + /* individual muxer options */ + while ((oformat = av_oformat_next(oformat))) { + if (oformat->priv_class) { + av_opt_show2(&oformat->priv_class, NULL, AV_OPT_FLAG_ENCODING_PARAM, 0); + printf("\n"); + } + } + + /* individual demuxer options */ + while ((iformat = av_iformat_next(iformat))) { + if (iformat->priv_class) { + av_opt_show2(&iformat->priv_class, NULL, AV_OPT_FLAG_DECODING_PARAM, 0); + printf("\n"); + } + } + + av_opt_show2(sws_opts, NULL, AV_OPT_FLAG_ENCODING_PARAM|AV_OPT_FLAG_DECODING_PARAM, 0); +} + +static int opt_target(const char *opt, const char *arg) +{ + enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN; + static const char *const frame_rates[] = {"25", "30000/1001", "24000/1001"}; + + if(!strncmp(arg, "pal-", 4)) { + norm = PAL; + arg += 4; + } else if(!strncmp(arg, "ntsc-", 5)) { + norm = NTSC; + arg += 5; + } else if(!strncmp(arg, "film-", 5)) { + norm = FILM; + arg += 5; + } else { + int fr; + /* Calculate FR via float to avoid int overflow */ + fr = (int)(frame_rate.num * 1000.0 / frame_rate.den); + if(fr == 25000) { + norm = PAL; + } else if((fr == 29970) || (fr == 23976)) { + norm = NTSC; + } else { + /* Try to determine PAL/NTSC by peeking in the input files */ + if(nb_input_files) { + int i, j; + for (j = 0; j < nb_input_files; j++) { + for (i = 0; i < input_files[j].ctx->nb_streams; i++) { + AVCodecContext *c = input_files[j].ctx->streams[i]->codec; + if(c->codec_type != AVMEDIA_TYPE_VIDEO) + continue; + fr = c->time_base.den * 1000 / c->time_base.num; + if(fr == 25000) { + norm = PAL; + break; + } else if((fr == 29970) || (fr == 23976)) { + norm = NTSC; + break; + } + } + if(norm != UNKNOWN) + break; + } + } + } + if(verbose > 0 && norm != UNKNOWN) + fprintf(stderr, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC"); + } + + if(norm == UNKNOWN) { + fprintf(stderr, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n"); + fprintf(stderr, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n"); + fprintf(stderr, "or set a framerate with \"-r xxx\".\n"); + exit_program(1); + } + + if(!strcmp(arg, "vcd")) { + opt_video_codec("vcodec", "mpeg1video"); + opt_audio_codec("vcodec", "mp2"); + opt_format("f", "vcd"); + + opt_frame_size("s", norm == PAL ? "352x288" : "352x240"); + opt_frame_rate("r", frame_rates[norm]); + opt_default("g", norm == PAL ? "15" : "18"); + + opt_default("b", "1150000"); + opt_default("maxrate", "1150000"); + opt_default("minrate", "1150000"); + opt_default("bufsize", "327680"); // 40*1024*8; + + opt_default("ab", "224000"); + audio_sample_rate = 44100; + audio_channels = 2; + + opt_default("packetsize", "2324"); + opt_default("muxrate", "1411200"); // 2352 * 75 * 8; + + /* We have to offset the PTS, so that it is consistent with the SCR. + SCR starts at 36000, but the first two packs contain only padding + and the first pack from the other stream, respectively, may also have + been written before. + So the real data starts at SCR 36000+3*1200. */ + mux_preload= (36000+3*1200) / 90000.0; //0.44 + } else if(!strcmp(arg, "svcd")) { + + opt_video_codec("vcodec", "mpeg2video"); + opt_audio_codec("acodec", "mp2"); + opt_format("f", "svcd"); + + opt_frame_size("s", norm == PAL ? "480x576" : "480x480"); + opt_frame_rate("r", frame_rates[norm]); + opt_default("g", norm == PAL ? "15" : "18"); + + opt_default("b", "2040000"); + opt_default("maxrate", "2516000"); + opt_default("minrate", "0"); //1145000; + opt_default("bufsize", "1835008"); //224*1024*8; + opt_default("flags", "+scan_offset"); + + + opt_default("ab", "224000"); + audio_sample_rate = 44100; + + opt_default("packetsize", "2324"); + + } else if(!strcmp(arg, "dvd")) { + + opt_video_codec("vcodec", "mpeg2video"); + opt_audio_codec("vcodec", "ac3"); + opt_format("f", "dvd"); + + opt_frame_size("vcodec", norm == PAL ? "720x576" : "720x480"); + opt_frame_rate("r", frame_rates[norm]); + opt_default("g", norm == PAL ? "15" : "18"); + + opt_default("b", "6000000"); + opt_default("maxrate", "9000000"); + opt_default("minrate", "0"); //1500000; + opt_default("bufsize", "1835008"); //224*1024*8; + + opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack. + opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8 + + opt_default("ab", "448000"); + audio_sample_rate = 48000; + + } else if(!strncmp(arg, "dv", 2)) { + + opt_format("f", "dv"); + + opt_frame_size("s", norm == PAL ? "720x576" : "720x480"); + opt_frame_pix_fmt("pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" : + norm == PAL ? "yuv420p" : "yuv411p"); + opt_frame_rate("r", frame_rates[norm]); + + audio_sample_rate = 48000; + audio_channels = 2; + + } else { + fprintf(stderr, "Unknown target: %s\n", arg); + return AVERROR(EINVAL); + } + return 0; +} + +static int opt_vstats_file(const char *opt, const char *arg) +{ + av_free (vstats_filename); + vstats_filename=av_strdup (arg); + return 0; +} + +static int opt_vstats(const char *opt, const char *arg) +{ + char filename[40]; + time_t today2 = time(NULL); + struct tm *today = localtime(&today2); + + snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min, + today->tm_sec); + return opt_vstats_file(opt, filename); +} + +static int opt_bsf(const char *opt, const char *arg) +{ + AVBitStreamFilterContext *bsfc= av_bitstream_filter_init(arg); //FIXME split name and args for filter at '=' + AVBitStreamFilterContext **bsfp; + + if(!bsfc){ + fprintf(stderr, "Unknown bitstream filter %s\n", arg); + exit_program(1); + } + + bsfp= *opt == 'v' ? &video_bitstream_filters : + *opt == 'a' ? &audio_bitstream_filters : + &subtitle_bitstream_filters; + while(*bsfp) + bsfp= &(*bsfp)->next; + + *bsfp= bsfc; + + return 0; +} + +static int opt_preset(const char *opt, const char *arg) +{ + FILE *f=NULL; + char filename[1000], tmp[1000], tmp2[1000], line[1000]; + char *codec_name = *opt == 'v' ? video_codec_name : + *opt == 'a' ? audio_codec_name : + subtitle_codec_name; + + if (!(f = get_preset_file(filename, sizeof(filename), arg, *opt == 'f', codec_name))) { + fprintf(stderr, "File for preset '%s' not found\n", arg); + exit_program(1); + } + + while(!feof(f)){ + int e= fscanf(f, "%999[^\n]\n", line) - 1; + if(line[0] == '#' && !e) + continue; + e|= sscanf(line, "%999[^=]=%999[^\n]\n", tmp, tmp2) - 2; + if(e){ + fprintf(stderr, "%s: Invalid syntax: '%s'\n", filename, line); + exit_program(1); + } + if(!strcmp(tmp, "acodec")){ + opt_audio_codec(tmp, tmp2); + }else if(!strcmp(tmp, "vcodec")){ + opt_video_codec(tmp, tmp2); + }else if(!strcmp(tmp, "scodec")){ + opt_subtitle_codec(tmp, tmp2); + }else if(!strcmp(tmp, "dcodec")){ + opt_data_codec(tmp, tmp2); + }else if(opt_default(tmp, tmp2) < 0){ + fprintf(stderr, "%s: Invalid option or argument: '%s', parsed as '%s' = '%s'\n", filename, line, tmp, tmp2); + exit_program(1); + } + } + + fclose(f); + + return 0; +} + +static const OptionDef options[] = { + /* main options */ +#include "cmdutils_common_opts.h" + { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" }, + { "i", HAS_ARG, {(void*)opt_input_file}, "input file name", "filename" }, + { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" }, + { "map", HAS_ARG | OPT_EXPERT, {(void*)opt_map}, "set input stream mapping", "file.stream[:syncfile.syncstream]" }, + { "map_meta_data", HAS_ARG | OPT_EXPERT, {(void*)opt_map_meta_data}, "DEPRECATED set meta data information of outfile from infile", + "outfile[,metadata]:infile[,metadata]" }, + { "map_metadata", HAS_ARG | OPT_EXPERT, {(void*)opt_map_metadata}, "set metadata information of outfile from infile", + "outfile[,metadata]:infile[,metadata]" }, + { "map_chapters", HAS_ARG | OPT_EXPERT, {(void*)opt_map_chapters}, "set chapters mapping", "outfile:infile" }, + { "t", HAS_ARG, {(void*)opt_recording_time}, "record or transcode \"duration\" seconds of audio/video", "duration" }, + { "fs", HAS_ARG | OPT_INT64, {(void*)&limit_filesize}, "set the limit file size in bytes", "limit_size" }, // + { "ss", HAS_ARG, {(void*)opt_start_time}, "set the start time offset", "time_off" }, + { "itsoffset", HAS_ARG, {(void*)opt_input_ts_offset}, "set the input ts offset", "time_off" }, + { "itsscale", HAS_ARG, {(void*)opt_input_ts_scale}, "set the input ts scale", "stream:scale" }, + { "timestamp", HAS_ARG, {(void*)opt_recording_timestamp}, "set the recording timestamp ('now' to set the current time)", "time" }, + { "metadata", HAS_ARG, {(void*)opt_metadata}, "add metadata", "string=string" }, + { "dframes", OPT_INT | HAS_ARG, {(void*)&max_frames[AVMEDIA_TYPE_DATA]}, "set the number of data frames to record", "number" }, + { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark}, + "add timings for benchmarking" }, + { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" }, + { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump}, + "dump each input packet" }, + { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump}, + "when dumping packets, also dump the payload" }, + { "re", OPT_BOOL | OPT_EXPERT, {(void*)&rate_emu}, "read input at native frame rate", "" }, + { "loop_input", OPT_BOOL | OPT_EXPERT, {(void*)&loop_input}, "deprecated, use -loop" }, + { "loop_output", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&loop_output}, "deprecated, use -loop", "" }, + { "v", HAS_ARG, {(void*)opt_verbose}, "set the verbosity level", "number" }, + { "target", HAS_ARG, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" }, + { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" }, + { "vsync", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&video_sync_method}, "video sync method", "" }, + { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" }, + { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" }, + { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" }, + { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying" }, + { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, // + { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" }, + { "programid", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&opt_programid}, "desired program number", "" }, + { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" }, + { "copyinkf", OPT_BOOL | OPT_EXPERT, {(void*)©_initial_nonkeyframes}, "copy initial non-keyframes" }, + + /* video options */ + { "vframes", OPT_INT | HAS_ARG | OPT_VIDEO, {(void*)&max_frames[AVMEDIA_TYPE_VIDEO]}, "set the number of video frames to record", "number" }, + { "r", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_rate}, "set frame rate (Hz value, fraction or abbreviation)", "rate" }, + { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" }, + { "aspect", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_aspect_ratio}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" }, + { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format, 'list' as argument shows all the pixel formats supported", "format" }, + { "croptop", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop}, "Removed, use the crop filter instead", "size" }, + { "cropbottom", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop}, "Removed, use the crop filter instead", "size" }, + { "cropleft", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop}, "Removed, use the crop filter instead", "size" }, + { "cropright", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop}, "Removed, use the crop filter instead", "size" }, + { "padtop", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" }, + { "padbottom", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" }, + { "padleft", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" }, + { "padright", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" }, + { "padcolor", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "color" }, + { "intra", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_only}, "use only intra frames"}, + { "vn", OPT_BOOL | OPT_VIDEO, {(void*)&video_disable}, "disable video" }, + { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" }, + { "qscale", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_qscale}, "use fixed video quantizer scale (VBR)", "q" }, + { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_video_rc_override_string}, "rate control override for specific intervals", "override" }, + { "vcodec", HAS_ARG | OPT_VIDEO, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" }, + { "me_threshold", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_me_threshold}, "motion estimaton threshold", "threshold" }, + { "sameq", OPT_BOOL | OPT_VIDEO, {(void*)&same_quality}, + "use same quantizer as source (implies VBR)" }, + { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" }, + { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" }, + { "deinterlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_deinterlace}, + "deinterlace pictures" }, + { "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" }, + { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" }, + { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" }, +#if CONFIG_AVFILTER + { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" }, +#endif + { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_intra_matrix}, "specify intra matrix coeffs", "matrix" }, + { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_inter_matrix}, "specify inter matrix coeffs", "matrix" }, + { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_top_field_first}, "top=1/bottom=0/auto=-1 field first", "" }, + { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" }, + { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_codec_tag}, "force video tag/fourcc", "fourcc/tag" }, + { "newvideo", OPT_VIDEO, {(void*)opt_new_stream}, "add a new video stream to the current output stream" }, + { "vlang", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void *)&video_language}, "set the ISO 639 language code (3 letters) of the current video stream" , "code" }, + { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" }, + { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&force_fps}, "force the selected framerate, disable the best supported framerate selection" }, + { "streamid", HAS_ARG | OPT_EXPERT, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" }, + { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void *)&forced_key_frames}, "force key frames at specified timestamps", "timestamps" }, + + /* audio options */ + { "aframes", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&max_frames[AVMEDIA_TYPE_AUDIO]}, "set the number of audio frames to record", "number" }, + { "aq", OPT_FLOAT | HAS_ARG | OPT_AUDIO, {(void*)&audio_qscale}, "set audio quality (codec-specific)", "quality", }, + { "ar", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_rate}, "set audio sampling rate (in Hz)", "rate" }, + { "ac", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_channels}, "set number of audio channels", "channels" }, + { "an", OPT_BOOL | OPT_AUDIO, {(void*)&audio_disable}, "disable audio" }, + { "acodec", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" }, + { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO, {(void*)opt_codec_tag}, "force audio tag/fourcc", "fourcc/tag" }, + { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, // + { "newaudio", OPT_AUDIO, {(void*)opt_new_stream}, "add a new audio stream to the current output stream" }, + { "alang", HAS_ARG | OPT_STRING | OPT_AUDIO, {(void *)&audio_language}, "set the ISO 639 language code (3 letters) of the current audio stream" , "code" }, + { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO, {(void*)opt_audio_sample_fmt}, "set sample format, 'list' as argument shows all the sample formats supported", "format" }, + + /* subtitle options */ + { "sn", OPT_BOOL | OPT_SUBTITLE, {(void*)&subtitle_disable}, "disable subtitle" }, + { "scodec", HAS_ARG | OPT_SUBTITLE, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" }, + { "newsubtitle", OPT_SUBTITLE, {(void*)opt_new_stream}, "add a new subtitle stream to the current output stream" }, + { "slang", HAS_ARG | OPT_STRING | OPT_SUBTITLE, {(void *)&subtitle_language}, "set the ISO 639 language code (3 letters) of the current subtitle stream" , "code" }, + { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE, {(void*)opt_codec_tag}, "force subtitle tag/fourcc", "fourcc/tag" }, + + /* grab options */ + { "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_channel}, "deprecated, use -channel", "channel" }, + { "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_standard}, "deprecated, use -standard", "standard" }, + { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" }, + + /* muxer options */ + { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT, {(void*)&mux_max_delay}, "set the maximum demux-decode delay", "seconds" }, + { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT, {(void*)&mux_preload}, "set the initial demux-decode delay", "seconds" }, + + { "absf", HAS_ARG | OPT_AUDIO | OPT_EXPERT, {(void*)opt_bsf}, "", "bitstream_filter" }, + { "vbsf", HAS_ARG | OPT_VIDEO | OPT_EXPERT, {(void*)opt_bsf}, "", "bitstream_filter" }, + { "sbsf", HAS_ARG | OPT_SUBTITLE | OPT_EXPERT, {(void*)opt_bsf}, "", "bitstream_filter" }, + + { "apre", HAS_ARG | OPT_AUDIO | OPT_EXPERT, {(void*)opt_preset}, "set the audio options to the indicated preset", "preset" }, + { "vpre", HAS_ARG | OPT_VIDEO | OPT_EXPERT, {(void*)opt_preset}, "set the video options to the indicated preset", "preset" }, + { "spre", HAS_ARG | OPT_SUBTITLE | OPT_EXPERT, {(void*)opt_preset}, "set the subtitle options to the indicated preset", "preset" }, + { "fpre", HAS_ARG | OPT_EXPERT, {(void*)opt_preset}, "set options from indicated preset file", "filename" }, + /* data codec support */ + { "dcodec", HAS_ARG | OPT_DATA, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" }, + + { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" }, + { NULL, }, +}; + +int main(int argc, char **argv) +{ + int64_t ti; + + av_log_set_flags(AV_LOG_SKIP_REPEATED); + + avcodec_register_all(); +#if CONFIG_AVDEVICE + avdevice_register_all(); +#endif +#if CONFIG_AVFILTER + avfilter_register_all(); +#endif + av_register_all(); + + avio_set_interrupt_cb(decode_interrupt_cb); + + init_opts(); + + show_banner(); + + /* parse options */ + parse_options(argc, argv, options, opt_output_file); + + if(nb_output_files <= 0 && nb_input_files == 0) { + show_usage(); + fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name); + exit_program(1); + } + + /* file converter / grab */ + if (nb_output_files <= 0) { + fprintf(stderr, "At least one output file must be specified\n"); + exit_program(1); + } + + if (nb_input_files == 0) { + fprintf(stderr, "At least one input file must be specified\n"); + exit_program(1); + } + + ti = getutime(); + if (transcode(output_files, nb_output_files, input_files, nb_input_files, + stream_maps, nb_stream_maps) < 0) + exit_program(1); + ti = getutime() - ti; + if (do_benchmark) { + int maxrss = getmaxrss() / 1024; + printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss); + } + + return exit_program(0); +} diff --git a/cmdutils.c b/cmdutils.c index f6e50fa534..d3fdae7600 100644 --- a/cmdutils.c +++ b/cmdutils.c @@ -754,9 +754,9 @@ FILE *get_preset_file(char *filename, size_t filename_size, { FILE *f = NULL; int i; - const char *base[3]= { getenv("FFMPEG_DATADIR"), + const char *base[3]= { getenv("AVCONV_DATADIR"), getenv("HOME"), - FFMPEG_DATADIR, + AVCONV_DATADIR, }; if (is_path) { @@ -766,11 +766,11 @@ FILE *get_preset_file(char *filename, size_t filename_size, for (i = 0; i < 3 && !f; i++) { if (!base[i]) continue; - snprintf(filename, filename_size, "%s%s/%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", preset_name); + snprintf(filename, filename_size, "%s%s/%s.ffpreset", base[i], i != 1 ? "" : "/.avconv", preset_name); f = fopen(filename, "r"); if (!f && codec_name) { snprintf(filename, filename_size, - "%s%s/%s-%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", codec_name, preset_name); + "%s%s/%s-%s.ffpreset", base[i], i != 1 ? "" : "/.avconv", codec_name, preset_name); f = fopen(filename, "r"); } } diff --git a/configure b/configure index bcf944f61e..65839df6de 100755 --- a/configure +++ b/configure @@ -65,7 +65,7 @@ Standard options: --disable-logging do not log configure debug information --prefix=PREFIX install in PREFIX [$prefix] --bindir=DIR install binaries in DIR [PREFIX/bin] - --datadir=DIR install data files in DIR [PREFIX/share/ffmpeg] + --datadir=DIR install data files in DIR [PREFIX/share/avconv] --libdir=DIR install libs in DIR [PREFIX/lib] --shlibdir=DIR install shared libs in DIR [PREFIX/lib] --incdir=DIR install includes in DIR [PREFIX/include] @@ -81,6 +81,7 @@ Configuration options: and binaries will be unredistributable [no] --disable-doc do not build documentation --disable-ffmpeg disable ffmpeg build + --disable-avconv disable avconv build --disable-avplay disable avplay build --disable-avprobe disable avprobe build --disable-avserver disable avserver build @@ -913,6 +914,7 @@ CONFIG_LIST=" dxva2 fastdiv ffmpeg + avconv avplay avprobe avserver @@ -1489,6 +1491,8 @@ postproc_deps="gpl" # programs ffmpeg_deps="avcodec avformat swscale" ffmpeg_select="buffer_filter" +av_deps="avcodec avformat swscale" +av_select="buffer_filter" avplay_deps="avcodec avformat swscale sdl" avplay_select="rdft" avprobe_deps="avcodec avformat" @@ -1595,7 +1599,7 @@ logfile="config.log" # installation paths prefix_default="/usr/local" bindir_default='${prefix}/bin' -datadir_default='${prefix}/share/ffmpeg' +datadir_default='${prefix}/share/avconv' incdir_default='${prefix}/include' libdir_default='${prefix}/lib' mandir_default='${prefix}/share/man' @@ -1635,6 +1639,7 @@ enable debug enable doc enable fastdiv enable ffmpeg +enable avconv enable avplay enable avprobe enable avserver @@ -3284,7 +3289,7 @@ cat > $TMPH </dev/null | sed -ne '9,$p' | grep '^\..\.' | cut -d' ' -f2 | sort >$exclude_fmts + $avconv -pix_fmts list 2>/dev/null | sed -ne '9,$p' | grep '^\..\.' | cut -d' ' -f2 | sort >$exclude_fmts $showfiltfmts scale | awk -F '[ \r]' '/^OUTPUT/{ print $3 }' | sort | comm -23 - $exclude_fmts >$out_fmts pix_fmts=$($showfiltfmts $filter | awk -F '[ \r]' '/^INPUT/{ print $3 }' | sort | comm -12 - $out_fmts) @@ -70,7 +70,7 @@ do_lavfi_pixfmts "scale" "200:100" do_lavfi_pixfmts "vflip" "" if [ -n "$do_pixdesc" ]; then - pix_fmts="$($ffmpeg -pix_fmts list 2>/dev/null | sed -ne '9,$p' | grep '^IO' | cut -d' ' -f2 | sort)" + pix_fmts="$($avconv -pix_fmts list 2>/dev/null | sed -ne '9,$p' | grep '^IO' | cut -d' ' -f2 | sort)" for pix_fmt in $pix_fmts; do do_video_filter $pix_fmt "slicify=random,format=$pix_fmt,pixdesctest" -pix_fmt $pix_fmt done diff --git a/tests/regression-funcs.sh b/tests/regression-funcs.sh index 979157bcf9..bba189092b 100755 --- a/tests/regression-funcs.sh +++ b/tests/regression-funcs.sh @@ -1,6 +1,6 @@ #!/bin/sh # -# common regression functions for ffmpeg +# common regression functions for avconv # # @@ -18,7 +18,7 @@ this="$test.$test_ref" outfile="$datadir/$test_ref/" # various files -ffmpeg="$target_exec ${target_path}/ffmpeg" +avconv="$target_exec ${target_path}/avconv" tiny_psnr="tests/tiny_psnr" raw_src="${target_path}/$raw_src_dir/%02d.pgm" raw_dst="$datadir/$this.out.yuv" @@ -43,23 +43,23 @@ echov(){ . $(dirname $0)/md5.sh -FFMPEG_OPTS="-v 0 -y" +AVCONV_OPTS="-v 0 -y" COMMON_OPTS="-flags +bitexact -idct simple -sws_flags +accurate_rnd+bitexact" DEC_OPTS="$COMMON_OPTS -threads $threads" ENC_OPTS="$COMMON_OPTS -threads 1 -dct fastint" -run_ffmpeg() +run_avconv() { - $echov $ffmpeg $FFMPEG_OPTS $* - $ffmpeg $FFMPEG_OPTS $* + $echov $avconv $AVCONV_OPTS $* + $avconv $AVCONV_OPTS $* } -do_ffmpeg() +do_avconv() { f="$1" shift set -- $* ${target_path}/$f - run_ffmpeg $* + run_avconv $* do_md5sum $f if [ $f = $raw_dst ] ; then $tiny_psnr $f $raw_ref @@ -70,12 +70,12 @@ do_ffmpeg() fi } -do_ffmpeg_nomd5() +do_avconv_nomd5() { f="$1" shift set -- $* ${target_path}/$f - run_ffmpeg $* + run_avconv $* if [ $f = $raw_dst ] ; then $tiny_psnr $f $raw_ref elif [ $f = $pcm_dst ] ; then @@ -85,32 +85,32 @@ do_ffmpeg_nomd5() fi } -do_ffmpeg_crc() +do_avconv_crc() { f="$1" shift - run_ffmpeg $* -f crc "$target_crcfile" + run_avconv $* -f crc "$target_crcfile" echo "$f $(cat $crcfile)" } do_video_decoding() { - do_ffmpeg $raw_dst $DEC_OPTS $1 -i $target_path/$file -f rawvideo $ENC_OPTS -vsync 0 $2 + do_avconv $raw_dst $DEC_OPTS $1 -i $target_path/$file -f rawvideo $ENC_OPTS -vsync 0 $2 } do_video_encoding() { file=${outfile}$1 - do_ffmpeg $file $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src $ENC_OPTS $2 + do_avconv $file $DEC_OPTS -f image2 -vcodec pgmyuv -i $raw_src $ENC_OPTS $2 } do_audio_encoding() { file=${outfile}$1 - do_ffmpeg $file $DEC_OPTS -ac 2 -ar 44100 -f s16le -i $pcm_src -ab 128k $ENC_OPTS $2 + do_avconv $file $DEC_OPTS -ac 2 -ar 44100 -f s16le -i $pcm_src -ab 128k $ENC_OPTS $2 } do_audio_decoding() { - do_ffmpeg $pcm_dst $DEC_OPTS -i $target_path/$file -sample_fmt s16 -f wav + do_avconv $pcm_dst $DEC_OPTS -i $target_path/$file -sample_fmt s16 -f wav } From b9aac90bd875c5cb6f42fcb53e3a8fde3f315a7e Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Wed, 27 Jul 2011 21:39:12 +0200 Subject: [PATCH 08/20] avconv: make -map_chapters accept only the input file index. This is consistent with how all the other options work. --- avconv.c | 80 ++++++++++++++----------------------------------- doc/avconv.texi | 9 +++--- 2 files changed, 28 insertions(+), 61 deletions(-) diff --git a/avconv.c b/avconv.c index 956de7b9ef..9a47ed7567 100644 --- a/avconv.c +++ b/avconv.c @@ -94,11 +94,6 @@ typedef struct MetadataMap { int index; //< stream/chapter/program number } MetadataMap; -typedef struct ChapterMap { - int in_file; - int out_file; -} ChapterMap; - static const OptionDef options[]; #define MAX_FILES 100 @@ -121,8 +116,7 @@ static int metadata_global_autocopy = 1; static int metadata_streams_autocopy = 1; static int metadata_chapters_autocopy = 1; -static ChapterMap *chapter_maps = NULL; -static int nb_chapter_maps; +static int chapters_input_file = INT_MAX; /* indexed by output file stream index */ static int *streamid_map = NULL; @@ -2464,38 +2458,6 @@ static int transcode(AVFormatContext **output_files, AV_DICT_DONT_OVERWRITE); } - /* copy chapters according to chapter maps */ - for (i = 0; i < nb_chapter_maps; i++) { - int infile = chapter_maps[i].in_file; - int outfile = chapter_maps[i].out_file; - - if (infile < 0 || outfile < 0) - continue; - if (infile >= nb_input_files) { - snprintf(error, sizeof(error), "Invalid input file index %d in chapter mapping.\n", infile); - ret = AVERROR(EINVAL); - goto dump_format; - } - if (outfile >= nb_output_files) { - snprintf(error, sizeof(error), "Invalid output file index %d in chapter mapping.\n",outfile); - ret = AVERROR(EINVAL); - goto dump_format; - } - copy_chapters(infile, outfile); - } - - /* copy chapters from the first input file that has them*/ - if (!nb_chapter_maps) - for (i = 0; i < nb_input_files; i++) { - if (!input_files[i].ctx->nb_chapters) - continue; - - for (j = 0; j < nb_output_files; j++) - if ((ret = copy_chapters(i, j)) < 0) - goto dump_format; - break; - } - /* open files and write file headers */ for(i=0;iout_file = strtol(arg, &p, 0); - if (*p) - p++; - - c->in_file = strtol(p, &p, 0); - return 0; -} - static int opt_input_ts_scale(const char *opt, const char *arg) { unsigned int stream; @@ -3732,7 +3678,7 @@ static int opt_streamid(const char *opt, const char *arg) static void opt_output_file(const char *filename) { AVFormatContext *oc; - int err, use_video, use_audio, use_subtitle, use_data; + int i, err, use_video, use_audio, use_subtitle, use_data; int input_has_video, input_has_audio, input_has_subtitle, input_has_data; AVOutputFormat *file_oformat; @@ -3857,12 +3803,32 @@ static void opt_output_file(const char *filename) } oc->flags |= AVFMT_FLAG_NONBLOCK; + /* copy chapters */ + if (chapters_input_file >= nb_input_files) { + if (chapters_input_file == INT_MAX) { + /* copy chapters from the first input file that has them*/ + chapters_input_file = -1; + for (i = 0; i < nb_input_files; i++) + if (input_files[i].ctx->nb_chapters) { + chapters_input_file = i; + break; + } + } else { + av_log(NULL, AV_LOG_ERROR, "Invalid input file index %d in chapter mapping.\n", + chapters_input_file); + exit_program(1); + } + } + if (chapters_input_file >= 0) + copy_chapters(chapters_input_file, nb_output_files - 1); + frame_rate = (AVRational){0, 0}; frame_width = 0; frame_height = 0; audio_sample_rate = 0; audio_channels = 0; audio_sample_fmt = AV_SAMPLE_FMT_NONE; + chapters_input_file = INT_MAX; av_freep(&forced_key_frames); uninit_opts(); @@ -4250,7 +4216,7 @@ static const OptionDef options[] = { "outfile[,metadata]:infile[,metadata]" }, { "map_metadata", HAS_ARG | OPT_EXPERT, {(void*)opt_map_metadata}, "set metadata information of outfile from infile", "outfile[,metadata]:infile[,metadata]" }, - { "map_chapters", HAS_ARG | OPT_EXPERT, {(void*)opt_map_chapters}, "set chapters mapping", "outfile:infile" }, + { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&chapters_input_file}, "set chapters mapping", "input_file_index" }, { "t", HAS_ARG, {(void*)opt_recording_time}, "record or transcode \"duration\" seconds of audio/video", "duration" }, { "fs", HAS_ARG | OPT_INT64, {(void*)&limit_filesize}, "set the limit file size in bytes", "limit_size" }, // { "ss", HAS_ARG, {(void*)opt_start_time}, "set the start time offset", "time_off" }, diff --git a/doc/avconv.texi b/doc/avconv.texi index 2a00c58bcb..24be5529d2 100644 --- a/doc/avconv.texi +++ b/doc/avconv.texi @@ -703,10 +703,11 @@ of the output file: @example avconv -i in.ogg -map_metadata 0:0,s0 out.mp3 @end example -@item -map_chapters @var{outfile}:@var{infile} -Copy chapters from @var{infile} to @var{outfile}. If no chapter mapping is specified, -then chapters are copied from the first input file with at least one chapter to all -output files. Use a negative file index to disable any chapter copying. +@item -map_chapters @var{input_file_index} +Copy chapters from input file with index @var{input_file_index} to the next +output file. If no chapter mapping is specified, then chapters are copied from +the first input file with at least one chapter. Use a negative file index to +disable any chapter copying. @item -debug Print specific debug info. @item -benchmark From c9065c29176bade732745425029116b0548a959c Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Thu, 28 Jul 2011 12:57:24 +0200 Subject: [PATCH 09/20] avconv: remove deprecated options. --- avconv.c | 52 ------------------------------------------------- doc/avconv.texi | 15 -------------- 2 files changed, 67 deletions(-) diff --git a/avconv.c b/avconv.c index 9a47ed7567..b72b26d68b 100644 --- a/avconv.c +++ b/avconv.c @@ -143,8 +143,6 @@ static int do_deinterlace = 0; static int top_field_first = -1; static int me_threshold = 0; static int intra_dc_precision = 8; -static int loop_input = 0; -static int loop_output = AVFMT_NOOUTPUTLOOP; static int qp_hist = 0; #if CONFIG_AVFILTER static char *vfilters = NULL; @@ -2889,20 +2887,6 @@ static int opt_audio_channels(const char *opt, const char *arg) return 0; } -static int opt_video_channel(const char *opt, const char *arg) -{ - av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -channel.\n"); - opt_default("channel", arg); - return 0; -} - -static int opt_video_standard(const char *opt, const char *arg) -{ - av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -standard.\n"); - opt_default("standard", arg); - return 0; -} - static int opt_codec(int *pstream_copy, char **pcodec_name, int codec_type, const char *arg) { @@ -3028,13 +3012,6 @@ static int opt_map_metadata(const char *opt, const char *arg) return 0; } -static int opt_map_meta_data(const char *opt, const char *arg) -{ - fprintf(stderr, "-map_meta_data is deprecated and will be removed soon. " - "Use -map_metadata instead.\n"); - return opt_map_metadata(opt, arg); -} - static int opt_input_ts_scale(const char *opt, const char *arg) { unsigned int stream; @@ -3063,19 +3040,6 @@ static int opt_start_time(const char *opt, const char *arg) return 0; } -static int opt_recording_timestamp(const char *opt, const char *arg) -{ - char buf[128]; - int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6; - struct tm time = *gmtime((time_t*)&recording_timestamp); - strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time); - opt_metadata("metadata", buf); - - av_log(NULL, AV_LOG_WARNING, "%s is deprecated, set the 'creation_time' metadata " - "tag instead.\n", opt); - return 0; -} - static int opt_input_ts_offset(const char *opt, const char *arg) { input_ts_offset = parse_time_or_die(opt, arg, 1); @@ -3192,11 +3156,6 @@ static int opt_input_file(const char *opt, const char *filename) opt_programid=0; } - if (loop_input) { - av_log(NULL, AV_LOG_WARNING, "-loop_input is deprecated, use -loop 1\n"); - ic->loop_input = loop_input; - } - /* Set AVCodecContext options for avformat_find_stream_info */ opts = setup_find_stream_info_opts(ic, codec_opts); orig_nb_streams = ic->nb_streams; @@ -3797,10 +3756,6 @@ static void opt_output_file(const char *filename) oc->preload= (int)(mux_preload*AV_TIME_BASE); oc->max_delay= (int)(mux_max_delay*AV_TIME_BASE); - if (loop_output >= 0) { - av_log(NULL, AV_LOG_WARNING, "-loop_output is deprecated, use -loop\n"); - oc->loop_output = loop_output; - } oc->flags |= AVFMT_FLAG_NONBLOCK; /* copy chapters */ @@ -4212,8 +4167,6 @@ static const OptionDef options[] = { { "i", HAS_ARG, {(void*)opt_input_file}, "input file name", "filename" }, { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" }, { "map", HAS_ARG | OPT_EXPERT, {(void*)opt_map}, "set input stream mapping", "file.stream[:syncfile.syncstream]" }, - { "map_meta_data", HAS_ARG | OPT_EXPERT, {(void*)opt_map_meta_data}, "DEPRECATED set meta data information of outfile from infile", - "outfile[,metadata]:infile[,metadata]" }, { "map_metadata", HAS_ARG | OPT_EXPERT, {(void*)opt_map_metadata}, "set metadata information of outfile from infile", "outfile[,metadata]:infile[,metadata]" }, { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&chapters_input_file}, "set chapters mapping", "input_file_index" }, @@ -4222,7 +4175,6 @@ static const OptionDef options[] = { { "ss", HAS_ARG, {(void*)opt_start_time}, "set the start time offset", "time_off" }, { "itsoffset", HAS_ARG, {(void*)opt_input_ts_offset}, "set the input ts offset", "time_off" }, { "itsscale", HAS_ARG, {(void*)opt_input_ts_scale}, "set the input ts scale", "stream:scale" }, - { "timestamp", HAS_ARG, {(void*)opt_recording_timestamp}, "set the recording timestamp ('now' to set the current time)", "time" }, { "metadata", HAS_ARG, {(void*)opt_metadata}, "add metadata", "string=string" }, { "dframes", OPT_INT | HAS_ARG, {(void*)&max_frames[AVMEDIA_TYPE_DATA]}, "set the number of data frames to record", "number" }, { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark}, @@ -4233,8 +4185,6 @@ static const OptionDef options[] = { { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump}, "when dumping packets, also dump the payload" }, { "re", OPT_BOOL | OPT_EXPERT, {(void*)&rate_emu}, "read input at native frame rate", "" }, - { "loop_input", OPT_BOOL | OPT_EXPERT, {(void*)&loop_input}, "deprecated, use -loop" }, - { "loop_output", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&loop_output}, "deprecated, use -loop", "" }, { "v", HAS_ARG, {(void*)opt_verbose}, "set the verbosity level", "number" }, { "target", HAS_ARG, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" }, { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" }, @@ -4316,8 +4266,6 @@ static const OptionDef options[] = { { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE, {(void*)opt_codec_tag}, "force subtitle tag/fourcc", "fourcc/tag" }, /* grab options */ - { "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_channel}, "deprecated, use -channel", "channel" }, - { "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_standard}, "deprecated, use -standard", "standard" }, { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" }, /* muxer options */ diff --git a/doc/avconv.texi b/doc/avconv.texi index 24be5529d2..32808fd32c 100644 --- a/doc/avconv.texi +++ b/doc/avconv.texi @@ -633,10 +633,6 @@ avconv -i file.mov -an -vn -sbsf mov2textsub -scodec copy -f rawvideo sub.txt @section Audio/Video grab options @table @option -@item -vc @var{channel} -Set video grab channel (DV1394 only). -@item -tvstd @var{standard} -Set television standard (NTSC, PAL (SECAM)). @item -isync Synchronize read on input. @end table @@ -681,9 +677,6 @@ avconv -i a.mov -i b.mov -vcodec copy -acodec copy out.mov -map 0.2 -map 1.6 To add more streams to the output file, you can use the @code{-newaudio}, @code{-newvideo}, @code{-newsubtitle} options. -@item -map_meta_data @var{outfile}[,@var{metadata}]:@var{infile}[,@var{metadata}] -Deprecated, use @var{-map_metadata} instead. - @item -map_metadata @var{outfile}[,@var{metadata}]:@var{infile}[,@var{metadata}] Set metadata information of @var{outfile} from @var{infile}. Note that those are file indices (zero-based), not filenames. @@ -725,14 +718,6 @@ Only use bit exact algorithms (for codec testing). Set RTP payload size in bytes. @item -re Read input at native frame rate. Mainly used to simulate a grab device. -@item -loop_input -Loop over the input stream. Currently it works only for image -streams. This option is used for automatic AVserver testing. -This option is deprecated, use -loop. -@item -loop_output @var{number_of_times} -Repeatedly loop output for formats that support looping such as animated GIF -(0 will loop the output infinitely). -This option is deprecated, use -loop. @item -threads @var{count} Thread count. @item -vsync @var{parameter} From e6e6060c9be60f5eb6c94556ca4c92f76cba0d1f Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Fri, 29 Jul 2011 13:07:27 +0200 Subject: [PATCH 10/20] avconv: make -map_metadata work consistently with the other options Before, it took an input and output file index, now it only takes an input file and applies to the next output file. Stream/chapter/program specification is now part of the option name and the delimiter was changed from ',' to ':' to be consistent with the similar feature for AVOptions. --- avconv.c | 145 ++++++++++++++++++++++++------------------------ doc/avconv.texi | 14 ++--- 2 files changed, 80 insertions(+), 79 deletions(-) diff --git a/avconv.c b/avconv.c index b72b26d68b..f5ca05add5 100644 --- a/avconv.c +++ b/avconv.c @@ -2091,10 +2091,6 @@ static int transcode(AVFormatContext **output_files, codec = ost->st->codec; icodec = ist->st->codec; - if (metadata_streams_autocopy) - av_dict_copy(&ost->st->metadata, ist->st->metadata, - AV_DICT_DONT_OVERWRITE); - ost->st->disposition = ist->st->disposition; codec->bits_per_raw_sample= icodec->bits_per_raw_sample; codec->chroma_sample_location = icodec->chroma_sample_location; @@ -2399,63 +2395,6 @@ static int transcode(AVFormatContext **output_files, ist->is_start = 1; } - /* set meta data information from input file if required */ - for (i=0;i= (nb_elems)) {\ - snprintf(error, sizeof(error), "Invalid %s index %d while processing metadata maps\n",\ - (desc), (index));\ - ret = AVERROR(EINVAL);\ - goto dump_format;\ - } - - int out_file_index = meta_data_maps[i][0].file; - int in_file_index = meta_data_maps[i][1].file; - if (in_file_index < 0 || out_file_index < 0) - continue; - METADATA_CHECK_INDEX(out_file_index, nb_output_files, "output file") - METADATA_CHECK_INDEX(in_file_index, nb_input_files, "input file") - - files[0] = output_files[out_file_index]; - files[1] = input_files[in_file_index].ctx; - - for (j = 0; j < 2; j++) { - MetadataMap *map = &meta_data_maps[i][j]; - - switch (map->type) { - case 'g': - meta[j] = &files[j]->metadata; - break; - case 's': - METADATA_CHECK_INDEX(map->index, files[j]->nb_streams, "stream") - meta[j] = &files[j]->streams[map->index]->metadata; - break; - case 'c': - METADATA_CHECK_INDEX(map->index, files[j]->nb_chapters, "chapter") - meta[j] = &files[j]->chapters[map->index]->metadata; - break; - case 'p': - METADATA_CHECK_INDEX(map->index, files[j]->nb_programs, "program") - meta[j] = &files[j]->programs[map->index]->metadata; - break; - } - } - - av_dict_copy(meta[0], *meta[1], AV_DICT_DONT_OVERWRITE); - } - - /* copy global metadata by default */ - if (metadata_global_autocopy) { - - for (i = 0; i < nb_output_files; i++) - av_dict_copy(&output_files[i]->metadata, input_files[0].ctx->metadata, - AV_DICT_DONT_OVERWRITE); - } - /* open files and write file headers */ for(i=0;ifile = strtol(arg, &p, 0); - parse_meta_type(p, &m->type, &m->index, &p); - if (*p) - p++; + parse_meta_type(p, &m->type, &m->index); - m1 = &meta_data_maps[nb_meta_data_maps - 1][1]; - m1->file = strtol(p, &p, 0); - parse_meta_type(p, &m1->type, &m1->index, &p); + m1 = &meta_data_maps[nb_meta_data_maps - 1][0]; + if (p = strchr(opt, ':')) + parse_meta_type(p, &m1->type, &m1->index); + else + m1->type = 'g'; if (m->type == 'g' || m1->type == 'g') metadata_global_autocopy = 0; @@ -3777,6 +3716,62 @@ static void opt_output_file(const char *filename) if (chapters_input_file >= 0) copy_chapters(chapters_input_file, nb_output_files - 1); + /* copy metadata */ + for (i = 0; i < nb_meta_data_maps; i++) { + AVFormatContext *files[2]; + AVDictionary **meta[2]; + int j; + +#define METADATA_CHECK_INDEX(index, nb_elems, desc)\ + if ((index) < 0 || (index) >= (nb_elems)) {\ + av_log(NULL, AV_LOG_ERROR, "Invalid %s index %d while processing metadata maps\n",\ + (desc), (index));\ + exit_program(1);\ + } + + int in_file_index = meta_data_maps[i][1].file; + if (in_file_index < 0) + continue; + METADATA_CHECK_INDEX(in_file_index, nb_input_files, "input file") + + files[0] = oc; + files[1] = input_files[in_file_index].ctx; + + for (j = 0; j < 2; j++) { + MetadataMap *map = &meta_data_maps[i][j]; + + switch (map->type) { + case 'g': + meta[j] = &files[j]->metadata; + break; + case 's': + METADATA_CHECK_INDEX(map->index, files[j]->nb_streams, "stream") + meta[j] = &files[j]->streams[map->index]->metadata; + break; + case 'c': + METADATA_CHECK_INDEX(map->index, files[j]->nb_chapters, "chapter") + meta[j] = &files[j]->chapters[map->index]->metadata; + break; + case 'p': + METADATA_CHECK_INDEX(map->index, files[j]->nb_programs, "program") + meta[j] = &files[j]->programs[map->index]->metadata; + break; + } + } + + av_dict_copy(meta[0], *meta[1], AV_DICT_DONT_OVERWRITE); + } + + /* copy global metadata by default */ + if (metadata_global_autocopy) + av_dict_copy(&oc->metadata, input_files[0].ctx->metadata, + AV_DICT_DONT_OVERWRITE); + if (metadata_streams_autocopy) + for (i = 0; i < oc->nb_streams; i++) { + InputStream *ist = &input_streams[output_streams_for_file[nb_output_files-1][i]->source_index]; + av_dict_copy(&oc->streams[i]->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE); + } + frame_rate = (AVRational){0, 0}; frame_width = 0; frame_height = 0; @@ -3785,6 +3780,12 @@ static void opt_output_file(const char *filename) audio_sample_fmt = AV_SAMPLE_FMT_NONE; chapters_input_file = INT_MAX; + av_freep(&meta_data_maps); + nb_meta_data_maps = 0; + metadata_global_autocopy = 1; + metadata_streams_autocopy = 1; + metadata_chapters_autocopy = 1; + av_freep(&forced_key_frames); uninit_opts(); init_opts(); diff --git a/doc/avconv.texi b/doc/avconv.texi index 32808fd32c..ab1df09fb3 100644 --- a/doc/avconv.texi +++ b/doc/avconv.texi @@ -677,16 +677,16 @@ avconv -i a.mov -i b.mov -vcodec copy -acodec copy out.mov -map 0.2 -map 1.6 To add more streams to the output file, you can use the @code{-newaudio}, @code{-newvideo}, @code{-newsubtitle} options. -@item -map_metadata @var{outfile}[,@var{metadata}]:@var{infile}[,@var{metadata}] -Set metadata information of @var{outfile} from @var{infile}. Note that those -are file indices (zero-based), not filenames. -Optional @var{metadata} parameters specify, which metadata to copy - (g)lobal +@item -map_metadata[:@var{metadata_type}][:@var{index}] @var{infile}[:@var{metadata_type}][:@var{index}] +Set metadata information of the next output file from @var{infile}. Note that +those are file indices (zero-based), not filenames. +Optional @var{metadata_type} parameters specify, which metadata to copy - (g)lobal (i.e. metadata that applies to the whole file), per-(s)tream, per-(c)hapter or per-(p)rogram. All metadata specifiers other than global must be followed by the -stream/chapter/program number. If metadata specifier is omitted, it defaults to +stream/chapter/program index. If metadata specifier is omitted, it defaults to global. -By default, global metadata is copied from the first input file to all output files, +By default, global metadata is copied from the first input file, per-stream and per-chapter metadata is copied along with streams/chapters. These default mappings are disabled by creating any mapping of the relevant type. A negative file index can be used to create a dummy mapping that just disables automatic copying. @@ -694,7 +694,7 @@ file index can be used to create a dummy mapping that just disables automatic co For example to copy metadata from the first stream of the input file to global metadata of the output file: @example -avconv -i in.ogg -map_metadata 0:0,s0 out.mp3 +avconv -i in.ogg -map_metadata 0:s:0 out.mp3 @end example @item -map_chapters @var{input_file_index} Copy chapters from input file with index @var{input_file_index} to the next From d70e51225062239c250ab6bc935b9711cf97c615 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Fri, 29 Jul 2011 13:47:37 +0200 Subject: [PATCH 11/20] cmdutils: allow ':'-separated modifiers in option names. --- cmdutils.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmdutils.c b/cmdutils.c index d3fdae7600..1df2556a6f 100644 --- a/cmdutils.c +++ b/cmdutils.c @@ -141,8 +141,11 @@ void show_help_options(const OptionDef *options, const char *msg, int mask, int } static const OptionDef* find_option(const OptionDef *po, const char *name){ + const char *p = strchr(name, ':'); + int len = p ? p - name : strlen(name); + while (po->name != NULL) { - if (!strcmp(name, po->name)) + if (!strncmp(name, po->name, len) && strlen(po->name) == len) break; po++; } From 69c4bee8fa56e5da7fc30198c7c1352c6c4a55c0 Mon Sep 17 00:00:00 2001 From: Reinhard Tartler Date: Tue, 9 Aug 2011 14:44:12 +0200 Subject: [PATCH 12/20] libx264: Include hint for possible values for configuring libx264 The output of the x264 commandline tool contains the most uptodate description of the possible values for the -preset, -tune and -profile options. --- libavcodec/libx264.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libavcodec/libx264.c b/libavcodec/libx264.c index bcf8b1f688..fea67f164a 100644 --- a/libavcodec/libx264.c +++ b/libavcodec/libx264.c @@ -362,10 +362,10 @@ static av_cold int X264_init(AVCodecContext *avctx) #define OFFSET(x) offsetof(X264Context, x) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM static const AVOption options[] = { - { "preset", "Set the encoding preset", OFFSET(preset), FF_OPT_TYPE_STRING, { 0 }, 0, 0, VE}, - { "tune", "Tune the encoding params", OFFSET(tune), FF_OPT_TYPE_STRING, { 0 }, 0, 0, VE}, - { "profile", "Set profile restrictions", OFFSET(profile), FF_OPT_TYPE_STRING, { 0 }, 0, 0, VE}, - { "fastfirstpass", "Use fast settings when encoding first pass", OFFSET(fastfirstpass), FF_OPT_TYPE_INT, { 1 }, 0, 1, VE}, + { "preset", "Set the encoding preset (cf. x264 --fullhelp)", OFFSET(preset), FF_OPT_TYPE_STRING, { 0 }, 0, 0, VE}, + { "tune", "Tune the encoding params (cf. x264 --fullhelp)", OFFSET(tune), FF_OPT_TYPE_STRING, { 0 }, 0, 0, VE}, + { "profile", "Set profile restrictions (cf. x264 --fullhelp) ", OFFSET(profile), FF_OPT_TYPE_STRING, { 0 }, 0, 0, VE}, + { "fastfirstpass", "Use fast settings when encoding first pass", OFFSET(fastfirstpass), FF_OPT_TYPE_INT, { 1 }, 0, 1, VE}, { NULL }, }; From f60d13663742d1c695680ede83c4d646bc57d380 Mon Sep 17 00:00:00 2001 From: Pino Toscano Date: Fri, 12 Aug 2011 14:11:21 +0200 Subject: [PATCH 13/20] configure: add missing CFLAGS to fix building on the HURD Signed-off-by: Reinhard Tartler Signed-off-by: Luca Barbato --- configure | 1 + 1 file changed, 1 insertion(+) diff --git a/configure b/configure index 65839df6de..9c6ce0ab8d 100755 --- a/configure +++ b/configure @@ -2498,6 +2498,7 @@ case $target_os in add_cppflags -D_POSIX_C_SOURCE=200112 -D_XOPEN_SOURCE=600 -D_BSD_SOURCE ;; gnu) + add_cppflags -D_POSIX_C_SOURCE=200112 -D_XOPEN_SOURCE=600 ;; qnx) add_cppflags -D_QNX_SOURCE From d4863fc1a83ceab1d75469b406a2c67e5659b2a0 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Thu, 28 Jul 2011 16:47:38 +0200 Subject: [PATCH 14/20] cmdutils: allow precisely specifying a stream for AVOptions. --- avconv.c | 8 +++-- avplay.c | 2 +- cmdutils.c | 60 +++++++++++++++++++++++++++++++++--- cmdutils.h | 16 ++++++++-- doc/fftools-common-opts.texi | 17 ++++++++++ ffmpeg.c | 4 +-- 6 files changed, 94 insertions(+), 13 deletions(-) diff --git a/avconv.c b/avconv.c index f5ca05add5..0db47a847f 100644 --- a/avconv.c +++ b/avconv.c @@ -674,8 +674,10 @@ static OutputStream *new_output_stream(AVFormatContext *oc, int file_idx, AVCode ost->index = idx; ost->st = st; ost->enc = codec; - if (codec) - ost->opts = filter_codec_opts(codec_opts, codec->id, 1); + if (codec) { + st->codec->codec_type = codec->type; + ost->opts = filter_codec_opts(codec_opts, codec->id, oc, st); + } avcodec_get_context_defaults3(st->codec, codec); @@ -3137,7 +3139,7 @@ static int opt_input_file(const char *opt, const char *filename) ist->st = st; ist->file_index = nb_input_files; ist->discard = 1; - ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, 0); + ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st); if (i < nb_ts_scale) ist->ts_scale = ts_scale[i]; diff --git a/avplay.c b/avplay.c index f43cf6faa6..e9b58a817f 100644 --- a/avplay.c +++ b/avplay.c @@ -2134,7 +2134,7 @@ static int stream_component_open(VideoState *is, int stream_index) return -1; avctx = ic->streams[stream_index]->codec; - opts = filter_codec_opts(codec_opts, avctx->codec_id, 0); + opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]); /* prepare audio output */ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { diff --git a/cmdutils.c b/cmdutils.c index 1df2556a6f..e00e902e48 100644 --- a/cmdutils.c +++ b/cmdutils.c @@ -289,7 +289,14 @@ unknown_opt: int opt_default(const char *opt, const char *arg) { const AVOption *o; - if ((o = av_opt_find(avcodec_opts[0], opt, NULL, 0, AV_OPT_SEARCH_CHILDREN)) || + char opt_stripped[128]; + const char *p; + + if (!(p = strchr(opt, ':'))) + p = opt + strlen(opt); + av_strlcpy(opt_stripped, opt, FFMIN(sizeof(opt_stripped), p - opt + 1)); + + if ((o = av_opt_find(avcodec_opts[0], opt_stripped, NULL, 0, AV_OPT_SEARCH_CHILDREN)) || ((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') && (o = av_opt_find(avcodec_opts[0], opt+1, NULL, 0, 0)))) av_dict_set(&codec_opts, opt, arg, FLAGS); @@ -782,12 +789,42 @@ FILE *get_preset_file(char *filename, size_t filename_size, return f; } -AVDictionary *filter_codec_opts(AVDictionary *opts, enum CodecID codec_id, int encoder) +int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec) +{ + if (*spec <= '9' && *spec >= '0') /* opt:index */ + return strtol(spec, NULL, 0) == st->index; + else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd') { /* opt:[vasd] */ + enum AVMediaType type; + + switch (*spec++) { + case 'v': type = AVMEDIA_TYPE_VIDEO; break; + case 'a': type = AVMEDIA_TYPE_AUDIO; break; + case 's': type = AVMEDIA_TYPE_SUBTITLE; break; + case 'd': type = AVMEDIA_TYPE_DATA; break; + } + if (type != st->codec->codec_type) + return 0; + if (*spec++ == ':') { /* possibly followed by :index */ + int i, index = strtol(spec, NULL, 0); + for (i = 0; i < s->nb_streams; i++) + if (s->streams[i]->codec->codec_type == type && index-- == 0) + return i == st->index; + return 0; + } + return 1; + } else if (!*spec) /* empty specifier, matches everything */ + return 1; + + av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec); + return AVERROR(EINVAL); +} + +AVDictionary *filter_codec_opts(AVDictionary *opts, enum CodecID codec_id, AVFormatContext *s, AVStream *st) { AVDictionary *ret = NULL; AVDictionaryEntry *t = NULL; - AVCodec *codec = encoder ? avcodec_find_encoder(codec_id) : avcodec_find_decoder(codec_id); - int flags = encoder ? AV_OPT_FLAG_ENCODING_PARAM : AV_OPT_FLAG_DECODING_PARAM; + AVCodec *codec = s->oformat ? avcodec_find_encoder(codec_id) : avcodec_find_decoder(codec_id); + int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM : AV_OPT_FLAG_DECODING_PARAM; char prefix = 0; if (!codec) @@ -800,11 +837,24 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum CodecID codec_id, int e } while (t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX)) { + char *p = strchr(t->key, ':'); + + /* check stream specification in opt name */ + if (p) + switch (check_stream_specifier(s, st, p + 1)) { + case 1: *p = 0; break; + case 0: continue; + default: return NULL; + } + if (av_opt_find(avcodec_opts[0], t->key, NULL, flags, 0) || (codec && codec->priv_class && av_opt_find(&codec->priv_class, t->key, NULL, flags, 0))) av_dict_set(&ret, t->key, t->value, 0); else if (t->key[0] == prefix && av_opt_find(avcodec_opts[0], t->key+1, NULL, flags, 0)) av_dict_set(&ret, t->key+1, t->value, 0); + + if (p) + *p = ':'; } return ret; } @@ -822,7 +872,7 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *cod return NULL; } for (i = 0; i < s->nb_streams; i++) - opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codec->codec_id, 0); + opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codec->codec_id, s, s->streams[i]); return opts; } diff --git a/cmdutils.h b/cmdutils.h index 7769194b9c..48ea213bde 100644 --- a/cmdutils.h +++ b/cmdutils.h @@ -148,16 +148,28 @@ void show_help_options(const OptionDef *options, const char *msg, int mask, int void parse_options(int argc, char **argv, const OptionDef *options, void (* parse_arg_function)(const char*)); +/** + * Check if the given stream matches a stream specifier. + * + * @param s Corresponding format context. + * @param st Stream from s to be checked. + * @param spec A stream specifier of the [v|a|s|d]:[] form. + * + * @return 1 if the stream matches, 0 if it doesn't, <0 on error + */ +int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec); + /** * Filter out options for given codec. * * Create a new options dictionary containing only the options from * opts which apply to the codec with ID codec_id. * - * @param encoder if non-zero the codec is an encoder, otherwise is a decoder + * @param s Corresponding format context. + * @param st A stream from s for which the options should be filtered. * @return a pointer to the created dictionary */ -AVDictionary *filter_codec_opts(AVDictionary *opts, enum CodecID codec_id, int encoder); +AVDictionary *filter_codec_opts(AVDictionary *opts, enum CodecID codec_id, AVFormatContext *s, AVStream *st); /** * Setup AVCodecContext options for avformat_find_stream_info(). diff --git a/doc/fftools-common-opts.texi b/doc/fftools-common-opts.texi index ba91b87e72..8ffc3299ce 100644 --- a/doc/fftools-common-opts.texi +++ b/doc/fftools-common-opts.texi @@ -114,5 +114,22 @@ muxer: ffmpeg -i input.flac -id3v2_version 3 out.mp3 @end example +You can precisely specify which stream(s) should the codec AVOption apply to by +appending a stream specifier of the form +@option{[:@var{stream_type}][:@var{stream_index}]} to the option name. +@var{stream_type} is 'v' for video, 'a' for audio and 's' for subtitle streams. +@var{stream_index} is a global stream index when @var{stream_type} isn't +given, otherwise it counts streams of the given type only. As always, the index +is zero-based. For example +@example +-foo -- applies to all applicable streams +-foo:v -- applies to all video streams +-foo:a:2 -- applies to the third audio stream +-foo:0 -- applies to the first stream +@end example + Note -nooption syntax cannot be used for boolean AVOptions, use -option 0/-option 1. + +Note2 old undocumented way of specifying per-stream AVOptions by prepending +v/a/s to the options name is now obsolete and will be removed soon. diff --git a/ffmpeg.c b/ffmpeg.c index 1a6ba51b42..079384b8a6 100644 --- a/ffmpeg.c +++ b/ffmpeg.c @@ -683,7 +683,7 @@ static OutputStream *new_output_stream(AVFormatContext *oc, int file_idx, AVCode ost->st = st; ost->enc = codec; if (codec) - ost->opts = filter_codec_opts(codec_opts, codec->id, 1); + ost->opts = filter_codec_opts(codec_opts, codec->id, oc, st); avcodec_get_context_defaults3(st->codec, codec); @@ -3293,7 +3293,7 @@ static int opt_input_file(const char *opt, const char *filename) ist->st = st; ist->file_index = nb_input_files; ist->discard = 1; - ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, 0); + ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st); if (i < nb_ts_scale) ist->ts_scale = ts_scale[i]; From 3d4f0dab79ccc8b1a662de440a789ec00b428963 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Fri, 29 Jul 2011 08:36:13 +0200 Subject: [PATCH 15/20] avconv: get rid of new* options. They are confusing, irregular and redundant -- -map already contains all the information. Stream maps can now be parsed in opt_output_file(). Add a more user-friendly default behavior in case no maps are present. Breaks -programid for now, but it never worked properly anyway. A better solution will be written soon. --- avconv.c | 305 ++++++++++++++++-------------------------------- doc/avconv.texi | 59 ++++------ 2 files changed, 122 insertions(+), 242 deletions(-) diff --git a/avconv.c b/avconv.c index 0db47a847f..1407565b1e 100644 --- a/avconv.c +++ b/avconv.c @@ -464,7 +464,6 @@ static int exit_program(int ret) av_free(vstats_filename); av_free(streamid_map); - av_free(stream_maps); av_free(meta_data_maps); av_freep(&input_streams); @@ -1946,8 +1945,7 @@ static void parse_forced_key_frames(char *kf, OutputStream *ost, static int transcode(AVFormatContext **output_files, int nb_output_files, InputFile *input_files, - int nb_input_files, - StreamMap *stream_maps, int nb_stream_maps) + int nb_input_files) { int ret = 0, i, j, k, n, nb_ostreams = 0; AVFormatContext *is, *os; @@ -1975,32 +1973,6 @@ static int transcode(AVFormatContext **output_files, } nb_ostreams += os->nb_streams; } - if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) { - fprintf(stderr, "Number of stream maps must match number of output streams\n"); - ret = AVERROR(EINVAL); - goto fail; - } - - /* Sanity check the mapping args -- do the input files & streams exist? */ - for(i=0;i nb_input_files - 1 || - si < 0 || si > input_files[fi].ctx->nb_streams - 1) { - fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si); - ret = AVERROR(EINVAL); - goto fail; - } - fi = stream_maps[i].sync_file_index; - si = stream_maps[i].sync_stream_index; - if (fi < 0 || fi > nb_input_files - 1 || - si < 0 || si > input_files[fi].ctx->nb_streams - 1) { - fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si); - ret = AVERROR(EINVAL); - goto fail; - } - } ost_table = av_mallocz(sizeof(OutputStream *) * nb_ostreams); if (!ost_table) @@ -2008,80 +1980,8 @@ static int transcode(AVFormatContext **output_files, n = 0; for(k=0;knb_streams;i++,n++) { - int found; - ost = ost_table[n] = output_streams_for_file[k][i]; - if (nb_stream_maps > 0) { - ost->source_index = input_files[stream_maps[n].file_index].ist_index + - stream_maps[n].stream_index; - - /* Sanity check that the stream types match */ - if (input_streams[ost->source_index].st->codec->codec_type != ost->st->codec->codec_type) { - int i= ost->file_index; - av_dump_format(output_files[i], i, output_files[i]->filename, 1); - fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n", - stream_maps[n].file_index, stream_maps[n].stream_index, - ost->file_index, ost->index); - exit_program(1); - } - - } else { - int best_nb_frames=-1; - /* get corresponding input stream index : we select the first one with the right type */ - found = 0; - for (j = 0; j < nb_input_streams; j++) { - int skip=0; - ist = &input_streams[j]; - if(opt_programid){ - int pi,si; - AVFormatContext *f = input_files[ist->file_index].ctx; - skip=1; - for(pi=0; pinb_programs; pi++){ - AVProgram *p= f->programs[pi]; - if(p->id == opt_programid) - for(si=0; sinb_stream_indexes; si++){ - if(f->streams[ p->stream_index[si] ] == ist->st) - skip=0; - } - } - } - if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && - ist->st->codec->codec_type == ost->st->codec->codec_type) { - if(best_nb_frames < ist->st->codec_info_nb_frames){ - best_nb_frames= ist->st->codec_info_nb_frames; - ost->source_index = j; - found = 1; - } - } - } - - if (!found) { - if(! opt_programid) { - /* try again and reuse existing stream */ - for (j = 0; j < nb_input_streams; j++) { - ist = &input_streams[j]; - if ( ist->st->codec->codec_type == ost->st->codec->codec_type - && ist->st->discard != AVDISCARD_ALL) { - ost->source_index = j; - found = 1; - } - } - } - if (!found) { - int i= ost->file_index; - av_dump_format(output_files[i], i, output_files[i]->filename, 1); - fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n", - ost->file_index, ost->index); - exit_program(1); - } - } - } - ist = &input_streams[ost->source_index]; - ist->discard = 0; - ost->sync_ist = (nb_stream_maps > 0) ? - &input_streams[input_files[stream_maps[n].sync_file_index].ist_index + - stream_maps[n].sync_stream_index] : ist; - } + for (i = 0; i < os->nb_streams; i++, n++) + ost_table[n] = output_streams_for_file[k][i]; } /* for each output stream, we compute the right encoding parameters */ @@ -3221,50 +3121,7 @@ static int opt_input_file(const char *opt, const char *filename) return 0; } -static void check_inputs(int *has_video_ptr, - int *has_audio_ptr, - int *has_subtitle_ptr, - int *has_data_ptr) -{ - int has_video, has_audio, has_subtitle, has_data, i, j; - AVFormatContext *ic; - - has_video = 0; - has_audio = 0; - has_subtitle = 0; - has_data = 0; - - for(j=0;jnb_streams;i++) { - AVCodecContext *enc = ic->streams[i]->codec; - switch(enc->codec_type) { - case AVMEDIA_TYPE_AUDIO: - has_audio = 1; - break; - case AVMEDIA_TYPE_VIDEO: - has_video = 1; - break; - case AVMEDIA_TYPE_SUBTITLE: - has_subtitle = 1; - break; - case AVMEDIA_TYPE_DATA: - case AVMEDIA_TYPE_ATTACHMENT: - case AVMEDIA_TYPE_UNKNOWN: - has_data = 1; - break; - default: - abort(); - } - } - } - *has_video_ptr = has_video; - *has_audio_ptr = has_audio; - *has_subtitle_ptr = has_subtitle; - *has_data_ptr = has_data; -} - -static void new_video_stream(AVFormatContext *oc, int file_idx) +static OutputStream *new_video_stream(AVFormatContext *oc, int file_idx) { AVStream *st; OutputStream *ost; @@ -3393,9 +3250,10 @@ static void new_video_stream(AVFormatContext *oc, int file_idx) av_freep(&forced_key_frames); video_stream_copy = 0; frame_pix_fmt = PIX_FMT_NONE; + return ost; } -static void new_audio_stream(AVFormatContext *oc, int file_idx) +static OutputStream *new_audio_stream(AVFormatContext *oc, int file_idx) { AVStream *st; OutputStream *ost; @@ -3454,9 +3312,11 @@ static void new_audio_stream(AVFormatContext *oc, int file_idx) audio_disable = 0; av_freep(&audio_codec_name); audio_stream_copy = 0; + + return ost; } -static void new_data_stream(AVFormatContext *oc, int file_idx) +static OutputStream *new_data_stream(AVFormatContext *oc, int file_idx) { AVStream *st; OutputStream *ost; @@ -3485,9 +3345,10 @@ static void new_data_stream(AVFormatContext *oc, int file_idx) data_disable = 0; av_freep(&data_codec_name); data_stream_copy = 0; + return ost; } -static void new_subtitle_stream(AVFormatContext *oc, int file_idx) +static OutputStream *new_subtitle_stream(AVFormatContext *oc, int file_idx) { AVStream *st; OutputStream *ost; @@ -3533,24 +3394,7 @@ static void new_subtitle_stream(AVFormatContext *oc, int file_idx) subtitle_disable = 0; av_freep(&subtitle_codec_name); subtitle_stream_copy = 0; -} - -static int opt_new_stream(const char *opt, const char *arg) -{ - AVFormatContext *oc; - int file_idx = nb_output_files - 1; - if (nb_output_files <= 0) { - fprintf(stderr, "At least one output file must be specified\n"); - exit_program(1); - } - oc = output_files[file_idx]; - - if (!strcmp(opt, "newvideo" )) new_video_stream (oc, file_idx); - else if (!strcmp(opt, "newaudio" )) new_audio_stream (oc, file_idx); - else if (!strcmp(opt, "newsubtitle")) new_subtitle_stream(oc, file_idx); - else if (!strcmp(opt, "newdata" )) new_data_stream (oc, file_idx); - else av_assert0(0); - return 0; + return ost; } /* arg format is "output-stream-index:streamid-value". */ @@ -3578,9 +3422,10 @@ static int opt_streamid(const char *opt, const char *arg) static void opt_output_file(const char *filename) { AVFormatContext *oc; - int i, err, use_video, use_audio, use_subtitle, use_data; - int input_has_video, input_has_audio, input_has_subtitle, input_has_data; + int i, err; AVOutputFormat *file_oformat; + OutputStream *ost; + InputStream *ist; if (!strcmp(filename, "-")) filename = "pipe:"; @@ -3619,42 +3464,94 @@ static void opt_output_file(const char *filename) print_error(filename, err); exit_program(1); } + } else if (!nb_stream_maps) { + /* pick the "best" stream of each type */ +#define NEW_STREAM(type, index)\ + if (index >= 0) {\ + ost = new_ ## type ## _stream(oc, nb_output_files);\ + ost->source_index = index;\ + ost->sync_ist = &input_streams[index];\ + input_streams[index].discard = 0;\ + } + + /* video: highest resolution */ + if (!video_disable && oc->oformat->video_codec != CODEC_ID_NONE) { + int area = 0, idx = -1; + for (i = 0; i < nb_input_streams; i++) { + ist = &input_streams[i]; + if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && + ist->st->codec->width * ist->st->codec->height > area) { + area = ist->st->codec->width * ist->st->codec->height; + idx = i; + } + } + NEW_STREAM(video, idx); + } + + /* audio: most channels */ + if (!audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) { + int channels = 0, idx = -1; + for (i = 0; i < nb_input_streams; i++) { + ist = &input_streams[i]; + if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && + ist->st->codec->channels > channels) { + channels = ist->st->codec->channels; + idx = i; + } + } + NEW_STREAM(audio, idx); + } + + /* subtitles: pick first */ + if (!subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) { + for (i = 0; i < nb_input_streams; i++) + if (input_streams[i].st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { + NEW_STREAM(subtitle, i); + break; + } + } + /* do something with data? */ } else { - use_video = file_oformat->video_codec != CODEC_ID_NONE || video_stream_copy || video_codec_name; - use_audio = file_oformat->audio_codec != CODEC_ID_NONE || audio_stream_copy || audio_codec_name; - use_subtitle = file_oformat->subtitle_codec != CODEC_ID_NONE || subtitle_stream_copy || subtitle_codec_name; - use_data = data_stream_copy || data_codec_name; /* XXX once generic data codec will be available add a ->data_codec reference and use it here */ + for (i = 0; i < nb_stream_maps; i++) { + StreamMap *map = &stream_maps[i]; + int fi = map->file_index; + int si = map->stream_index; - /* disable if no corresponding type found */ - check_inputs(&input_has_video, - &input_has_audio, - &input_has_subtitle, - &input_has_data); + if (fi < 0 || fi >= nb_input_files || + si < 0 || si >= input_files[fi].ctx->nb_streams) { + av_log(NULL, AV_LOG_ERROR, "Input stream #%d.%d does not exist.\n", fi, si); + exit_program(1); + } + fi = map->sync_file_index; + si = map->sync_stream_index; + if (fi < 0 || fi >= nb_input_files || + si < 0 || si >= input_files[fi].ctx->nb_streams) { + av_log(NULL, AV_LOG_ERROR, "Sync stream #%d.%d does not exist.\n", fi, si); + exit_program(1); + } - if (!input_has_video) - use_video = 0; - if (!input_has_audio) - use_audio = 0; - if (!input_has_subtitle) - use_subtitle = 0; - if (!input_has_data) - use_data = 0; + ist = &input_streams[input_files[map->file_index].ist_index + map->stream_index]; + switch (ist->st->codec->codec_type) { + case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(oc, nb_output_files); break; + case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(oc, nb_output_files); break; + case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(oc, nb_output_files); break; + case AVMEDIA_TYPE_DATA: ost = new_data_stream(oc, nb_output_files); break; + default: + av_log(NULL, AV_LOG_ERROR, "Cannot map stream #%d.%d - unsupported type.\n", + map->file_index, map->stream_index); + exit_program(1); + } - /* manual disable */ - if (audio_disable) use_audio = 0; - if (video_disable) use_video = 0; - if (subtitle_disable) use_subtitle = 0; - if (data_disable) use_data = 0; - - if (use_video) new_video_stream(oc, nb_output_files); - if (use_audio) new_audio_stream(oc, nb_output_files); - if (use_subtitle) new_subtitle_stream(oc, nb_output_files); - if (use_data) new_data_stream(oc, nb_output_files); - - av_dict_copy(&oc->metadata, metadata, 0); - av_dict_free(&metadata); + ost->source_index = input_files[map->file_index].ist_index + map->stream_index; + ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index + + map->sync_stream_index]; + ist->discard = 0; + } } + av_dict_copy(&oc->metadata, metadata, 0); + av_dict_free(&metadata); + av_dict_copy(&output_opts[nb_output_files], format_opts, 0); output_files[nb_output_files++] = oc; @@ -3787,6 +3684,8 @@ static void opt_output_file(const char *filename) metadata_global_autocopy = 1; metadata_streams_autocopy = 1; metadata_chapters_autocopy = 1; + av_freep(&stream_maps); + nb_stream_maps = 0; av_freep(&forced_key_frames); uninit_opts(); @@ -4241,7 +4140,6 @@ static const OptionDef options[] = { { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_top_field_first}, "top=1/bottom=0/auto=-1 field first", "" }, { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" }, { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_codec_tag}, "force video tag/fourcc", "fourcc/tag" }, - { "newvideo", OPT_VIDEO, {(void*)opt_new_stream}, "add a new video stream to the current output stream" }, { "vlang", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void *)&video_language}, "set the ISO 639 language code (3 letters) of the current video stream" , "code" }, { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" }, { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&force_fps}, "force the selected framerate, disable the best supported framerate selection" }, @@ -4257,14 +4155,12 @@ static const OptionDef options[] = { { "acodec", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" }, { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO, {(void*)opt_codec_tag}, "force audio tag/fourcc", "fourcc/tag" }, { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, // - { "newaudio", OPT_AUDIO, {(void*)opt_new_stream}, "add a new audio stream to the current output stream" }, { "alang", HAS_ARG | OPT_STRING | OPT_AUDIO, {(void *)&audio_language}, "set the ISO 639 language code (3 letters) of the current audio stream" , "code" }, { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO, {(void*)opt_audio_sample_fmt}, "set sample format, 'list' as argument shows all the sample formats supported", "format" }, /* subtitle options */ { "sn", OPT_BOOL | OPT_SUBTITLE, {(void*)&subtitle_disable}, "disable subtitle" }, { "scodec", HAS_ARG | OPT_SUBTITLE, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" }, - { "newsubtitle", OPT_SUBTITLE, {(void*)opt_new_stream}, "add a new subtitle stream to the current output stream" }, { "slang", HAS_ARG | OPT_STRING | OPT_SUBTITLE, {(void *)&subtitle_language}, "set the ISO 639 language code (3 letters) of the current subtitle stream" , "code" }, { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE, {(void*)opt_codec_tag}, "force subtitle tag/fourcc", "fourcc/tag" }, @@ -4332,8 +4228,7 @@ int main(int argc, char **argv) } ti = getutime(); - if (transcode(output_files, nb_output_files, input_files, nb_input_files, - stream_maps, nb_stream_maps) < 0) + if (transcode(output_files, nb_output_files, input_files, nb_input_files) < 0) exit_program(1); ti = getutime() - ti; if (do_benchmark) { diff --git a/doc/avconv.texi b/doc/avconv.texi index ab1df09fb3..d325b1d14f 100644 --- a/doc/avconv.texi +++ b/doc/avconv.texi @@ -65,6 +65,20 @@ specified for the inputs. @c man end DESCRIPTION +@chapter Stream selection +@c man begin STREAM SELECTION + +By default av tries to pick the "best" stream of each type present in input +files and add them to each output file. For video, this means the highest +resolution, for audio the highest channel count. For subtitle it's simply the +first subtitle stream. + +You can disable some of those defaults by using @code{-vn/-an/-sn} options. For +full manual control, use the @code{-map} option, which disables the defaults just +described. + +@c man end STREAM SELECTION + @chapter Options @c man begin OPTIONS @@ -148,9 +162,6 @@ Set the number of data frames to record. @item -scodec @var{codec} Force subtitle codec ('copy' to copy stream). -@item -newsubtitle -Add a new subtitle stream to the current output stream. - @item -slang @var{code} Set the ISO 639 language code (3 letters) of the current subtitle stream. @@ -295,9 +306,6 @@ prefix is ``av2pass''. The complete file name will be @file{PREFIX-N.log}, where N is a number specific to the output stream. -@item -newvideo -Add a new video stream to the current output stream. - @item -vlang @var{code} Set the ISO 639 language code (3 letters) of the current video stream. @@ -565,18 +573,6 @@ Disable audio recording. @item -acodec @var{codec} Force audio codec to @var{codec}. Use the @code{copy} special value to specify that the raw codec data must be copied as is. -@item -newaudio -Add a new audio track to the output file. If you want to specify parameters, -do so before @code{-newaudio} (@code{-acodec}, @code{-ab}, etc..). - -Mapping will be done automatically, if the number of output streams is equal to -the number of input streams, else it will pick the first one that matches. You -can override the mapping using @code{-map} as usual. - -Example: -@example -avconv -i file.mpg -vcodec copy -acodec ac3 -ab 384k test.mpg -acodec mp2 -ab 192k -newaudio -@end example @item -alang @var{code} Set the ISO 639 language code (3 letters) of the current audio stream. @end table @@ -617,8 +613,6 @@ Bitstream filters available are "dump_extra", "remove_extra", "noise", "mp3comp" @table @option @item -scodec @var{codec} Force subtitle codec ('copy' to copy stream). -@item -newsubtitle -Add a new subtitle stream to the current output stream. @item -slang @var{code} Set the ISO 639 language code (3 letters) of the current subtitle stream. @item -sn @@ -649,19 +643,16 @@ file. Both indexes start at 0. If specified, @var{sync_file_id}.@var{sync_stream_id} sets which input stream is used as a presentation sync reference. -The @code{-map} options must be specified just after the output file. -If any @code{-map} options are used, the number of @code{-map} options -on the command line must match the number of streams in the output -file. The first @code{-map} option on the command line specifies the +The first @code{-map} option on the command line specifies the source for output stream 0, the second @code{-map} option specifies the source for output stream 1, etc. For example, if you have two audio streams in the first input file, these streams are identified by "0.0" and "0.1". You can use -@code{-map} to select which stream to place in an output file. For +@code{-map} to select which streams to place in an output file. For example: @example -avconv -i INPUT out.wav -map 0.1 +avconv -i INPUT -map 0.1 out.wav @end example will map the input stream in @file{INPUT} identified by "0.1" to the (single) output stream in @file{out.wav}. @@ -671,11 +662,10 @@ For example, to select the stream with index 2 from input file index 6 from input @file{b.mov} (specified by the identifier "1.6"), and copy them to the output file @file{out.mov}: @example -avconv -i a.mov -i b.mov -vcodec copy -acodec copy out.mov -map 0.2 -map 1.6 +avconv -i a.mov -i b.mov -vcodec copy -acodec copy -map 0.2 -map 1.6 out.mov @end example -To add more streams to the output file, you can use the -@code{-newaudio}, @code{-newvideo}, @code{-newsubtitle} options. +Note that using this option disables the default mappings for this output file. @item -map_metadata[:@var{metadata_type}][:@var{index}] @var{infile}[:@var{metadata_type}][:@var{index}] Set metadata information of the next output file from @var{infile}. Note that @@ -1008,16 +998,11 @@ only formats accepting a normal integer are suitable. You can put many streams of the same type in the output: @example -avconv -i test1.avi -i test2.avi -vcodec copy -acodec copy -vcodec copy -acodec copy test12.avi -newvideo -newaudio +avconv -i test1.avi -i test2.avi -map 0.3 -map 0.2 -map 0.1 -map 0.0 -vcodec copy -acodec copy -vcodec copy -acodec copy test12.nut @end example -In addition to the first video and audio streams, the resulting -output file @file{test12.avi} will contain the second video -and the second audio stream found in the input streams list. - -The @code{-newvideo}, @code{-newaudio} and @code{-newsubtitle} -options have to be specified immediately after the name of the output -file to which you want to add them. +The resulting output file @file{test12.avi} will contain first four streams from +the input file in reverse order. @end itemize @c man end EXAMPLES From 8d2e4a7e687b7fdbb939e236399cf774dc7bead6 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Fri, 29 Jul 2011 15:21:51 +0200 Subject: [PATCH 16/20] avconv: change semantics of -map New syntax contains an optional stream type, allowing to refer to n-th stream of specific type. Omitting stream number now maps all streams of the given type. --- avconv.c | 110 +++++++++++++++++++++++++++++++++--------------- doc/avconv.texi | 47 ++++++++++++++++----- 2 files changed, 113 insertions(+), 44 deletions(-) diff --git a/avconv.c b/avconv.c index 1407565b1e..877079c887 100644 --- a/avconv.c +++ b/avconv.c @@ -79,6 +79,7 @@ const int program_birth_year = 2000; /* select an input stream for an output stream */ typedef struct StreamMap { + int disabled; /** 1 is this mapping is disabled by a negative map */ int file_index; int stream_index; int sync_file_index; @@ -2780,27 +2781,82 @@ static int opt_codec_tag(const char *opt, const char *arg) static int opt_map(const char *opt, const char *arg) { - StreamMap *m; - char *p; + StreamMap *m = NULL; + int i, negative = 0, file_idx; + int sync_file_idx = -1, sync_stream_idx; + char *p, *sync; + char *map; - stream_maps = grow_array(stream_maps, sizeof(*stream_maps), &nb_stream_maps, nb_stream_maps + 1); - m = &stream_maps[nb_stream_maps-1]; - - m->file_index = strtol(arg, &p, 0); - if (*p) - p++; - - m->stream_index = strtol(p, &p, 0); - if (*p) { - p++; - m->sync_file_index = strtol(p, &p, 0); - if (*p) - p++; - m->sync_stream_index = strtol(p, &p, 0); - } else { - m->sync_file_index = m->file_index; - m->sync_stream_index = m->stream_index; + if (*arg == '-') { + negative = 1; + arg++; } + map = av_strdup(arg); + + /* parse sync stream first, just pick first matching stream */ + if (sync = strchr(map, ',')) { + *sync = 0; + sync_file_idx = strtol(sync + 1, &sync, 0); + if (sync_file_idx >= nb_input_files || sync_file_idx < 0) { + av_log(NULL, AV_LOG_ERROR, "Invalid sync file index: %d.\n", sync_file_idx); + exit_program(1); + } + if (*sync) + sync++; + for (i = 0; i < input_files[sync_file_idx].ctx->nb_streams; i++) + if (check_stream_specifier(input_files[sync_file_idx].ctx, + input_files[sync_file_idx].ctx->streams[i], sync) == 1) { + sync_stream_idx = i; + break; + } + if (i == input_files[sync_file_idx].ctx->nb_streams) { + av_log(NULL, AV_LOG_ERROR, "Sync stream specification in map %s does not " + "match any streams.\n", arg); + exit_program(1); + } + } + + + file_idx = strtol(map, &p, 0); + if (file_idx >= nb_input_files || file_idx < 0) { + av_log(NULL, AV_LOG_ERROR, "Invalid input file index: %d.\n", file_idx); + exit_program(1); + } + if (negative) + /* disable some already defined maps */ + for (i = 0; i < nb_stream_maps; i++) { + m = &stream_maps[i]; + if (check_stream_specifier(input_files[m->file_index].ctx, + input_files[m->file_index].ctx->streams[m->stream_index], + *p == ':' ? p + 1 : p) > 0) + m->disabled = 1; + } + else + for (i = 0; i < input_files[file_idx].ctx->nb_streams; i++) { + if (check_stream_specifier(input_files[file_idx].ctx, input_files[file_idx].ctx->streams[i], + *p == ':' ? p + 1 : p) <= 0) + continue; + stream_maps = grow_array(stream_maps, sizeof(*stream_maps), &nb_stream_maps, nb_stream_maps + 1); + m = &stream_maps[nb_stream_maps - 1]; + + m->file_index = file_idx; + m->stream_index = i; + + if (sync_file_idx >= 0) { + m->sync_file_index = sync_file_idx; + m->sync_stream_index = sync_stream_idx; + } else { + m->sync_file_index = file_idx; + m->sync_stream_index = i; + } + } + + if (!m) { + av_log(NULL, AV_LOG_ERROR, "Stream map '%s' matches no streams.\n", arg); + exit_program(1); + } + + av_freep(&map); return 0; } @@ -3514,21 +3570,9 @@ static void opt_output_file(const char *filename) } else { for (i = 0; i < nb_stream_maps; i++) { StreamMap *map = &stream_maps[i]; - int fi = map->file_index; - int si = map->stream_index; - if (fi < 0 || fi >= nb_input_files || - si < 0 || si >= input_files[fi].ctx->nb_streams) { - av_log(NULL, AV_LOG_ERROR, "Input stream #%d.%d does not exist.\n", fi, si); - exit_program(1); - } - fi = map->sync_file_index; - si = map->sync_stream_index; - if (fi < 0 || fi >= nb_input_files || - si < 0 || si >= input_files[fi].ctx->nb_streams) { - av_log(NULL, AV_LOG_ERROR, "Sync stream #%d.%d does not exist.\n", fi, si); - exit_program(1); - } + if (map->disabled) + continue; ist = &input_streams[input_files[map->file_index].ist_index + map->stream_index]; switch (ist->st->codec->codec_type) { diff --git a/doc/avconv.texi b/doc/avconv.texi index d325b1d14f..d9d20137aa 100644 --- a/doc/avconv.texi +++ b/doc/avconv.texi @@ -634,35 +634,60 @@ Synchronize read on input. @section Advanced options @table @option -@item -map @var{input_file_id}.@var{input_stream_id}[:@var{sync_file_id}.@var{sync_stream_id}] +@item -map [-]@var{input_file_id}[:@var{input_stream_type}][:@var{input_stream_id}][,@var{sync_file_id}[:@var{sync_stream_type}][:@var{sync_stream_id}]] -Designate an input stream as a source for the output file. Each input +Designate one or more input streams as a source for the output file. Each input stream is identified by the input file index @var{input_file_id} and the input stream index @var{input_stream_id} within the input -file. Both indexes start at 0. If specified, -@var{sync_file_id}.@var{sync_stream_id} sets which input stream +file. Both indices start at 0. If specified, +@var{sync_file_id}:@var{sync_stream_id} sets which input stream is used as a presentation sync reference. +If @var{input_stream_type} is specified -- 'v' for video, 'a' for audio, 's' for +subtitle and 'd' for data -- then @var{input_stream_id} counts only the streams +of this type. Same for @var{sync_stream_type}. + +@var{input_stream_id} may be omitted, in which case all streams of the given +type are mapped (or all streams in the file, if no type is specified). + The first @code{-map} option on the command line specifies the source for output stream 0, the second @code{-map} option specifies the source for output stream 1, etc. +A @code{-} character before the stream identifier creates a "negative" mapping. +It disables matching streams from already created mappings. + +For example, to map ALL streams from the first input file to output +@example +av -i INPUT -map 0 output +@end example + For example, if you have two audio streams in the first input file, -these streams are identified by "0.0" and "0.1". You can use +these streams are identified by "0:0" and "0:1". You can use @code{-map} to select which streams to place in an output file. For example: @example -avconv -i INPUT -map 0.1 out.wav +avconv -i INPUT -map 0:1 out.wav @end example -will map the input stream in @file{INPUT} identified by "0.1" to +will map the input stream in @file{INPUT} identified by "0:1" to the (single) output stream in @file{out.wav}. For example, to select the stream with index 2 from input file -@file{a.mov} (specified by the identifier "0.2"), and stream with -index 6 from input @file{b.mov} (specified by the identifier "1.6"), +@file{a.mov} (specified by the identifier "0:2"), and stream with +index 6 from input @file{b.mov} (specified by the identifier "1:6"), and copy them to the output file @file{out.mov}: @example -avconv -i a.mov -i b.mov -vcodec copy -acodec copy -map 0.2 -map 1.6 out.mov +avconv -i a.mov -i b.mov -vcodec copy -acodec copy -map 0:2 -map 1:6 out.mov +@end example + +To select all video and the third audio stream from an input file: +@example +avconv -i INPUT -map 0:v -map 0:a:2 OUTPUT +@end example + +To map all the streams except the second audio, use negative mappings +@example +avconv -i INPUT -map 0 -map -0:a:1 OUTPUT @end example Note that using this option disables the default mappings for this output file. @@ -943,7 +968,7 @@ You can encode to several formats at the same time and define a mapping from input stream to output streams: @example -avconv -i /tmp/a.wav -ab 64k /tmp/a.mp2 -ab 128k /tmp/b.mp2 -map 0:0 -map 0:0 +avconv -i /tmp/a.wav -map 0:a -ab 64k /tmp/a.mp2 -map 0:a -ab 128k /tmp/b.mp2 @end example Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. '-map From 2f63440c59bf8c4a2e90280e23e8cadb235a42e6 Mon Sep 17 00:00:00 2001 From: Dustin Brody Date: Sat, 6 Aug 2011 04:42:34 +0000 Subject: [PATCH 17/20] lavf: add support for error_recognition, use it in avidec, and bump minor API version Signed-off-by: Anton Khirnov --- libavformat/avformat.h | 8 ++++++++ libavformat/avidec.c | 6 ++++-- libavformat/options.c | 3 +++ libavformat/version.h | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/libavformat/avformat.h b/libavformat/avformat.h index 2f68abab25..957039f64e 100644 --- a/libavformat/avformat.h +++ b/libavformat/avformat.h @@ -854,6 +854,14 @@ typedef struct AVFormatContext { * decoding: number of frames used to probe fps */ int fps_probe_size; + + /** + * Error recognition; higher values will detect more errors but may + * misdetect some more or less valid parts as errors. + * - encoding: unused + * - decoding: Set by user. + */ + int error_recognition; } AVFormatContext; typedef struct AVPacketList { diff --git a/libavformat/avidec.c b/libavformat/avidec.c index 0ea093c6d7..8c0ef13b14 100644 --- a/libavformat/avidec.c +++ b/libavformat/avidec.c @@ -667,8 +667,9 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) break; case MKTAG('i', 'n', 'd', 'x'): i= avio_tell(pb); - if(pb->seekable && !(s->flags & AVFMT_FLAG_IGNIDX)){ - read_braindead_odml_indx(s, 0); + if(pb->seekable && !(s->flags & AVFMT_FLAG_IGNIDX) && + read_braindead_odml_indx(s, 0) < 0 && s->error_recognition >= FF_ER_EXPLODE){ + goto fail; } avio_seek(pb, i+size, SEEK_SET); break; @@ -706,6 +707,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) if(size > 1000000){ av_log(s, AV_LOG_ERROR, "Something went wrong during header parsing, " "I will ignore it and try to continue anyway.\n"); + if (s->error_recognition >= FF_ER_EXPLODE) goto fail; avi->movi_list = avio_tell(pb) - 4; avi->movi_end = avio_size(pb); goto end_of_header; diff --git a/libavformat/options.c b/libavformat/options.c index 961162876f..3ca8c83480 100644 --- a/libavformat/options.c +++ b/libavformat/options.c @@ -87,6 +87,9 @@ static const AVOption options[]={ {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, {.dbl = DEFAULT }, 0, INT_MAX, E|D, "fdebug"}, {"ts", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_FDEBUG_TS }, INT_MIN, INT_MAX, E|D, "fdebug"}, {"max_delay", "maximum muxing or demuxing delay in microseconds", OFFSET(max_delay), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, E|D}, +{"fer", "set error detection aggressivity", OFFSET(error_recognition), FF_OPT_TYPE_INT, {.dbl = FF_ER_CAREFUL }, INT_MIN, INT_MAX, D, "fer"}, +{"careful", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_ER_CAREFUL }, INT_MIN, INT_MAX, D, "fer"}, +{"explode", "abort decoding on error recognition", 0, FF_OPT_TYPE_CONST, {.dbl = FF_ER_EXPLODE }, INT_MIN, INT_MAX, D, "fer"}, {"fpsprobesize", "number of frames used to probe fps", OFFSET(fps_probe_size), FF_OPT_TYPE_INT, {.dbl = -1}, -1, INT_MAX-1, D}, {NULL}, }; diff --git a/libavformat/version.h b/libavformat/version.h index bb12b5137a..4473717fd7 100644 --- a/libavformat/version.h +++ b/libavformat/version.h @@ -24,7 +24,7 @@ #include "libavutil/avutil.h" #define LIBAVFORMAT_VERSION_MAJOR 53 -#define LIBAVFORMAT_VERSION_MINOR 3 +#define LIBAVFORMAT_VERSION_MINOR 4 #define LIBAVFORMAT_VERSION_MICRO 0 #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ From e3a70c7c5ff589ff6ce70fa0a29dc220f95dff17 Mon Sep 17 00:00:00 2001 From: Dustin Brody Date: Sat, 6 Aug 2011 01:42:35 -0400 Subject: [PATCH 18/20] APIchanges: note error_recognition in lavf Signed-off-by: Anton Khirnov --- doc/APIchanges | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/APIchanges b/doc/APIchanges index 09a12fe3d2..ac95d168eb 100644 --- a/doc/APIchanges +++ b/doc/APIchanges @@ -13,6 +13,9 @@ libavutil: 2011-04-18 API changes, most recent first: +2011-08-06 - 2f63440 - lavf 53.4.0 + Add error_recognition to AVFormatContext. + 2011-08-02 - 9d39cbf - lavc 53.7.1 Add AV_PKT_FLAG_CORRUPT AVPacket flag. From 3a39195b1db5620901a049cd690752b1935f5e0f Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sat, 23 Jul 2011 18:36:35 -0700 Subject: [PATCH 19/20] Move x86inc.asm to libavutil/. This allows using it in libswscale/ also. --- libavcodec/x86/ac3dsp.asm | 2 +- libavcodec/x86/dct32_sse.asm | 2 +- libavcodec/x86/deinterlace.asm | 2 +- libavcodec/x86/dsputil_yasm.asm | 2 +- libavcodec/x86/dsputilenc_yasm.asm | 2 +- libavcodec/x86/fft_mmx.asm | 2 +- libavcodec/x86/fmtconvert.asm | 2 +- libavcodec/x86/h264_chromamc.asm | 2 +- libavcodec/x86/h264_chromamc_10bit.asm | 2 +- libavcodec/x86/h264_deblock.asm | 2 +- libavcodec/x86/h264_deblock_10bit.asm | 2 +- libavcodec/x86/h264_idct.asm | 2 +- libavcodec/x86/h264_idct_10bit.asm | 2 +- libavcodec/x86/h264_intrapred.asm | 2 +- libavcodec/x86/h264_intrapred_10bit.asm | 2 +- libavcodec/x86/h264_qpel_10bit.asm | 2 +- libavcodec/x86/h264_weight.asm | 2 +- libavcodec/x86/h264_weight_10bit.asm | 2 +- libavcodec/x86/vc1dsp_yasm.asm | 2 +- libavcodec/x86/vp3dsp.asm | 2 +- libavcodec/x86/vp56dsp.asm | 2 +- libavcodec/x86/vp8dsp.asm | 2 +- {libavcodec => libavutil}/x86/x86inc.asm | 0 23 files changed, 22 insertions(+), 22 deletions(-) rename {libavcodec => libavutil}/x86/x86inc.asm (100%) diff --git a/libavcodec/x86/ac3dsp.asm b/libavcodec/x86/ac3dsp.asm index c1b0906a85..44124c5397 100644 --- a/libavcodec/x86/ac3dsp.asm +++ b/libavcodec/x86/ac3dsp.asm @@ -19,7 +19,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/dct32_sse.asm b/libavcodec/x86/dct32_sse.asm index 720a061078..4e938ad198 100644 --- a/libavcodec/x86/dct32_sse.asm +++ b/libavcodec/x86/dct32_sse.asm @@ -19,7 +19,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA 32 diff --git a/libavcodec/x86/deinterlace.asm b/libavcodec/x86/deinterlace.asm index 8613485d5d..a782b2aae1 100644 --- a/libavcodec/x86/deinterlace.asm +++ b/libavcodec/x86/deinterlace.asm @@ -20,7 +20,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/dsputil_yasm.asm b/libavcodec/x86/dsputil_yasm.asm index 4e1ec24a7a..c75bd5c0f4 100644 --- a/libavcodec/x86/dsputil_yasm.asm +++ b/libavcodec/x86/dsputil_yasm.asm @@ -19,7 +19,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" SECTION_RODATA pb_f: times 16 db 15 diff --git a/libavcodec/x86/dsputilenc_yasm.asm b/libavcodec/x86/dsputilenc_yasm.asm index 6063ff1040..09450226e0 100644 --- a/libavcodec/x86/dsputilenc_yasm.asm +++ b/libavcodec/x86/dsputilenc_yasm.asm @@ -21,7 +21,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;***************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION .text diff --git a/libavcodec/x86/fft_mmx.asm b/libavcodec/x86/fft_mmx.asm index 27276a1a31..2177a6ba7a 100644 --- a/libavcodec/x86/fft_mmx.asm +++ b/libavcodec/x86/fft_mmx.asm @@ -28,7 +28,7 @@ ; in blocks as conventient to the vector size. ; i.e. {4x real, 4x imaginary, 4x real, ...} (or 2x respectively) -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %ifdef ARCH_X86_64 %define pointer resq diff --git a/libavcodec/x86/fmtconvert.asm b/libavcodec/x86/fmtconvert.asm index efab87d570..a69b3ff357 100644 --- a/libavcodec/x86/fmtconvert.asm +++ b/libavcodec/x86/fmtconvert.asm @@ -19,7 +19,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_TEXT diff --git a/libavcodec/x86/h264_chromamc.asm b/libavcodec/x86/h264_chromamc.asm index 16cf2ec43e..201dcd85a6 100644 --- a/libavcodec/x86/h264_chromamc.asm +++ b/libavcodec/x86/h264_chromamc.asm @@ -20,7 +20,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_chromamc_10bit.asm b/libavcodec/x86/h264_chromamc_10bit.asm index 9d075434fe..af574844dc 100644 --- a/libavcodec/x86/h264_chromamc_10bit.asm +++ b/libavcodec/x86/h264_chromamc_10bit.asm @@ -22,7 +22,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_deblock.asm b/libavcodec/x86/h264_deblock.asm index 92f91acade..ac25dca92d 100644 --- a/libavcodec/x86/h264_deblock.asm +++ b/libavcodec/x86/h264_deblock.asm @@ -24,7 +24,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION .text diff --git a/libavcodec/x86/h264_deblock_10bit.asm b/libavcodec/x86/h264_deblock_10bit.asm index baac725eec..0f525449e3 100644 --- a/libavcodec/x86/h264_deblock_10bit.asm +++ b/libavcodec/x86/h264_deblock_10bit.asm @@ -24,7 +24,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_idct.asm b/libavcodec/x86/h264_idct.asm index 37c2c90476..482881b332 100644 --- a/libavcodec/x86/h264_idct.asm +++ b/libavcodec/x86/h264_idct.asm @@ -26,7 +26,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;***************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_idct_10bit.asm b/libavcodec/x86/h264_idct_10bit.asm index 54636a95d0..6a45724f3f 100644 --- a/libavcodec/x86/h264_idct_10bit.asm +++ b/libavcodec/x86/h264_idct_10bit.asm @@ -22,7 +22,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_intrapred.asm b/libavcodec/x86/h264_intrapred.asm index c1cd5c4d25..258eef563f 100644 --- a/libavcodec/x86/h264_intrapred.asm +++ b/libavcodec/x86/h264_intrapred.asm @@ -22,7 +22,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_intrapred_10bit.asm b/libavcodec/x86/h264_intrapred_10bit.asm index e14e31a38c..7d8ce08690 100644 --- a/libavcodec/x86/h264_intrapred_10bit.asm +++ b/libavcodec/x86/h264_intrapred_10bit.asm @@ -22,7 +22,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_qpel_10bit.asm b/libavcodec/x86/h264_qpel_10bit.asm index 15dd72ca36..3df3048100 100644 --- a/libavcodec/x86/h264_qpel_10bit.asm +++ b/libavcodec/x86/h264_qpel_10bit.asm @@ -22,7 +22,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA 32 diff --git a/libavcodec/x86/h264_weight.asm b/libavcodec/x86/h264_weight.asm index d80ca32583..9502462de2 100644 --- a/libavcodec/x86/h264_weight.asm +++ b/libavcodec/x86/h264_weight.asm @@ -21,7 +21,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" SECTION .text diff --git a/libavcodec/x86/h264_weight_10bit.asm b/libavcodec/x86/h264_weight_10bit.asm index 1c58d72d94..abf382389b 100644 --- a/libavcodec/x86/h264_weight_10bit.asm +++ b/libavcodec/x86/h264_weight_10bit.asm @@ -22,7 +22,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA 32 diff --git a/libavcodec/x86/vc1dsp_yasm.asm b/libavcodec/x86/vc1dsp_yasm.asm index 220cc03da3..0bf7185a61 100644 --- a/libavcodec/x86/vc1dsp_yasm.asm +++ b/libavcodec/x86/vc1dsp_yasm.asm @@ -19,7 +19,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" cextern pw_4 diff --git a/libavcodec/x86/vp3dsp.asm b/libavcodec/x86/vp3dsp.asm index 23574383a1..23616a1863 100644 --- a/libavcodec/x86/vp3dsp.asm +++ b/libavcodec/x86/vp3dsp.asm @@ -19,7 +19,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" ; MMX-optimized functions cribbed from the original VP3 source code. diff --git a/libavcodec/x86/vp56dsp.asm b/libavcodec/x86/vp56dsp.asm index c70ed60d76..fc68cabef0 100644 --- a/libavcodec/x86/vp56dsp.asm +++ b/libavcodec/x86/vp56dsp.asm @@ -20,7 +20,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" cextern pw_64 diff --git a/libavcodec/x86/vp8dsp.asm b/libavcodec/x86/vp8dsp.asm index 7d9ebc9463..ae2055ce96 100644 --- a/libavcodec/x86/vp8dsp.asm +++ b/libavcodec/x86/vp8dsp.asm @@ -20,7 +20,7 @@ ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** -%include "x86inc.asm" +%include "libavutil/x86/x86inc.asm" %include "x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/x86inc.asm b/libavutil/x86/x86inc.asm similarity index 100% rename from libavcodec/x86/x86inc.asm rename to libavutil/x86/x86inc.asm From b2c087871dafc7d030b2d48457ddff597dfd4925 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Wed, 3 Aug 2011 09:48:08 -0700 Subject: [PATCH 20/20] Move x86util.asm from libavcodec/ to libavutil/. This allows using it in swscale also. --- libavcodec/x86/ac3dsp.asm | 2 +- libavcodec/x86/dct32_sse.asm | 2 +- libavcodec/x86/deinterlace.asm | 2 +- libavcodec/x86/dsputilenc_yasm.asm | 2 +- libavcodec/x86/fmtconvert.asm | 2 +- libavcodec/x86/h264_chromamc.asm | 2 +- libavcodec/x86/h264_chromamc_10bit.asm | 2 +- libavcodec/x86/h264_deblock.asm | 2 +- libavcodec/x86/h264_deblock_10bit.asm | 2 +- libavcodec/x86/h264_idct.asm | 2 +- libavcodec/x86/h264_idct_10bit.asm | 2 +- libavcodec/x86/h264_intrapred.asm | 2 +- libavcodec/x86/h264_intrapred_10bit.asm | 2 +- libavcodec/x86/h264_qpel_10bit.asm | 2 +- libavcodec/x86/h264_weight_10bit.asm | 2 +- libavcodec/x86/vc1dsp_yasm.asm | 2 +- libavcodec/x86/vp3dsp.asm | 2 +- libavcodec/x86/vp56dsp.asm | 2 +- libavcodec/x86/vp8dsp.asm | 2 +- {libavcodec => libavutil}/x86/x86util.asm | 0 20 files changed, 19 insertions(+), 19 deletions(-) rename {libavcodec => libavutil}/x86/x86util.asm (100%) diff --git a/libavcodec/x86/ac3dsp.asm b/libavcodec/x86/ac3dsp.asm index 44124c5397..9e92678afe 100644 --- a/libavcodec/x86/ac3dsp.asm +++ b/libavcodec/x86/ac3dsp.asm @@ -20,7 +20,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/dct32_sse.asm b/libavcodec/x86/dct32_sse.asm index 4e938ad198..f3eaf3a956 100644 --- a/libavcodec/x86/dct32_sse.asm +++ b/libavcodec/x86/dct32_sse.asm @@ -20,7 +20,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA 32 diff --git a/libavcodec/x86/deinterlace.asm b/libavcodec/x86/deinterlace.asm index a782b2aae1..9150f4578e 100644 --- a/libavcodec/x86/deinterlace.asm +++ b/libavcodec/x86/deinterlace.asm @@ -21,7 +21,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/dsputilenc_yasm.asm b/libavcodec/x86/dsputilenc_yasm.asm index 09450226e0..a0531b0b7e 100644 --- a/libavcodec/x86/dsputilenc_yasm.asm +++ b/libavcodec/x86/dsputilenc_yasm.asm @@ -22,7 +22,7 @@ ;***************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION .text diff --git a/libavcodec/x86/fmtconvert.asm b/libavcodec/x86/fmtconvert.asm index a69b3ff357..e384e8f0b2 100644 --- a/libavcodec/x86/fmtconvert.asm +++ b/libavcodec/x86/fmtconvert.asm @@ -20,7 +20,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_TEXT diff --git a/libavcodec/x86/h264_chromamc.asm b/libavcodec/x86/h264_chromamc.asm index 201dcd85a6..caef7dd7be 100644 --- a/libavcodec/x86/h264_chromamc.asm +++ b/libavcodec/x86/h264_chromamc.asm @@ -21,7 +21,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_chromamc_10bit.asm b/libavcodec/x86/h264_chromamc_10bit.asm index af574844dc..56c0511857 100644 --- a/libavcodec/x86/h264_chromamc_10bit.asm +++ b/libavcodec/x86/h264_chromamc_10bit.asm @@ -23,7 +23,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_deblock.asm b/libavcodec/x86/h264_deblock.asm index ac25dca92d..1304e40261 100644 --- a/libavcodec/x86/h264_deblock.asm +++ b/libavcodec/x86/h264_deblock.asm @@ -25,7 +25,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION .text diff --git a/libavcodec/x86/h264_deblock_10bit.asm b/libavcodec/x86/h264_deblock_10bit.asm index 0f525449e3..0a1809c188 100644 --- a/libavcodec/x86/h264_deblock_10bit.asm +++ b/libavcodec/x86/h264_deblock_10bit.asm @@ -25,7 +25,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_idct.asm b/libavcodec/x86/h264_idct.asm index 482881b332..42c175a0d8 100644 --- a/libavcodec/x86/h264_idct.asm +++ b/libavcodec/x86/h264_idct.asm @@ -27,7 +27,7 @@ ;***************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_idct_10bit.asm b/libavcodec/x86/h264_idct_10bit.asm index 6a45724f3f..a9c12da5a2 100644 --- a/libavcodec/x86/h264_idct_10bit.asm +++ b/libavcodec/x86/h264_idct_10bit.asm @@ -23,7 +23,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_intrapred.asm b/libavcodec/x86/h264_intrapred.asm index 258eef563f..71b5aa130c 100644 --- a/libavcodec/x86/h264_intrapred.asm +++ b/libavcodec/x86/h264_intrapred.asm @@ -23,7 +23,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_intrapred_10bit.asm b/libavcodec/x86/h264_intrapred_10bit.asm index 7d8ce08690..5a06896afe 100644 --- a/libavcodec/x86/h264_intrapred_10bit.asm +++ b/libavcodec/x86/h264_intrapred_10bit.asm @@ -23,7 +23,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/h264_qpel_10bit.asm b/libavcodec/x86/h264_qpel_10bit.asm index 3df3048100..30d0913cd1 100644 --- a/libavcodec/x86/h264_qpel_10bit.asm +++ b/libavcodec/x86/h264_qpel_10bit.asm @@ -23,7 +23,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA 32 diff --git a/libavcodec/x86/h264_weight_10bit.asm b/libavcodec/x86/h264_weight_10bit.asm index abf382389b..affc4ce5c4 100644 --- a/libavcodec/x86/h264_weight_10bit.asm +++ b/libavcodec/x86/h264_weight_10bit.asm @@ -23,7 +23,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA 32 diff --git a/libavcodec/x86/vc1dsp_yasm.asm b/libavcodec/x86/vc1dsp_yasm.asm index 0bf7185a61..bc53239e9d 100644 --- a/libavcodec/x86/vc1dsp_yasm.asm +++ b/libavcodec/x86/vc1dsp_yasm.asm @@ -20,7 +20,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" cextern pw_4 cextern pw_5 diff --git a/libavcodec/x86/vp3dsp.asm b/libavcodec/x86/vp3dsp.asm index 23616a1863..096879bc47 100644 --- a/libavcodec/x86/vp3dsp.asm +++ b/libavcodec/x86/vp3dsp.asm @@ -20,7 +20,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" ; MMX-optimized functions cribbed from the original VP3 source code. diff --git a/libavcodec/x86/vp56dsp.asm b/libavcodec/x86/vp56dsp.asm index fc68cabef0..5c77bea55a 100644 --- a/libavcodec/x86/vp56dsp.asm +++ b/libavcodec/x86/vp56dsp.asm @@ -21,7 +21,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" cextern pw_64 diff --git a/libavcodec/x86/vp8dsp.asm b/libavcodec/x86/vp8dsp.asm index ae2055ce96..1434be629e 100644 --- a/libavcodec/x86/vp8dsp.asm +++ b/libavcodec/x86/vp8dsp.asm @@ -21,7 +21,7 @@ ;****************************************************************************** %include "libavutil/x86/x86inc.asm" -%include "x86util.asm" +%include "libavutil/x86/x86util.asm" SECTION_RODATA diff --git a/libavcodec/x86/x86util.asm b/libavutil/x86/x86util.asm similarity index 100% rename from libavcodec/x86/x86util.asm rename to libavutil/x86/x86util.asm