mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Merge remote-tracking branch 'qatar/master'
* qatar/master: (23 commits) build: cosmetics: Reorder some lists in a more logical fashion x86: pngdsp: Fix assembly for OS/2 fate: add test for RTjpeg in nuv with frameheader rtmp: send check_bw as notification g723_1: clip argument for 15-bit version of normalize_bits() g723_1: use all LPC vectors in formant postfilter id3v2: Support v2.2 PIC avplay: fix build with lavfi disabled. avconv: split configuring filter configuration to a separate file. avconv: split option parsing into a separate file. mpc8: do not leave padding after last frame in buffer for the next decode call mpegaudioenc: list supported channel layouts. mpegaudiodec: don't print an error on > 1 frame in a packet. api-example: update to new audio encoding API. configure: add --enable/disable-random option doc: cygwin: Update list of FATE package requirements build: Remove all installed headers and header directories on uninstall build: change checkheaders to use regular build rules rtmp: Add a new option 'rtmp_subscribe' rtmp: Add support for subscribing live streams ... Conflicts: Makefile common.mak configure doc/examples/decoding_encoding.c ffmpeg.c libavcodec/g723_1.c libavcodec/mpegaudiodec.c libavcodec/x86/pngdsp.asm libavformat/version.h library.mak tests/fate/video.mak Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
11a1033c9f
2
.gitignore
vendored
2
.gitignore
vendored
@ -4,7 +4,7 @@
|
||||
*.def
|
||||
*.dll
|
||||
*.exe
|
||||
*.ho
|
||||
*.h.c
|
||||
*.lib
|
||||
*.pc
|
||||
*.so
|
||||
|
6
Makefile
6
Makefile
@ -18,6 +18,7 @@ PROGS-$(CONFIG_FFSERVER) += ffserver
|
||||
PROGS := $(PROGS-yes:%=%$(EXESUF))
|
||||
INSTPROGS = $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
|
||||
OBJS = cmdutils.o
|
||||
OBJS-ffmpeg = ffmpeg_opt.o ffmpeg_filter.o
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr base64
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
TOOLS = qt-faststart trasher
|
||||
@ -68,8 +69,9 @@ config.h: .config
|
||||
|
||||
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ALTIVEC-OBJS ARMV5TE-OBJS ARMV6-OBJS ARMVFP-OBJS MMI-OBJS \
|
||||
MMX-OBJS NEON-OBJS VIS-OBJS YASM-OBJS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS ARMVFP-OBJS NEON-OBJS \
|
||||
MMI-OBJS ALTIVEC-OBJS VIS-OBJS \
|
||||
MMX-OBJS YASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
|
||||
OBJS TESTOBJS
|
||||
|
||||
|
14
common.mak
14
common.mak
@ -10,8 +10,9 @@ ifndef SUBDIR
|
||||
ifndef V
|
||||
Q = @
|
||||
ECHO = printf "$(1)\t%s\n" $(2)
|
||||
BRIEF = CC CXX AS YASM AR LD HOSTCC STRIP CP
|
||||
SILENT = DEPCC DEPAS DEPHOSTCC DEPYASM RM RANLIB
|
||||
BRIEF = CC CXX HOSTCC AS YASM AR LD STRIP CP
|
||||
SILENT = DEPCC DEPHOSTCC DEPAS DEPYASM RANLIB RM
|
||||
|
||||
MSG = $@
|
||||
M = @$(call ECHO,$(TAG),$@);
|
||||
$(foreach VAR,$(BRIEF), \
|
||||
@ -98,8 +99,9 @@ DEP_LIBS := $(foreach NAME,$(FFLIBS),lib$(NAME)/$($(CONFIG_SHARED:yes=S)LIBNAME)
|
||||
ALLHEADERS := $(subst $(SRC_DIR)/,$(SUBDIR),$(wildcard $(SRC_DIR)/*.h $(SRC_DIR)/$(ARCH)/*.h))
|
||||
SKIPHEADERS += $(ARCH_HEADERS:%=$(ARCH)/%) $(SKIPHEADERS-)
|
||||
SKIPHEADERS := $(SKIPHEADERS:%=$(SUBDIR)%)
|
||||
HEADEROBJS := $(filter-out $(SKIPHEADERS:.h=.ho),$(ALLHEADERS:.h=.ho))
|
||||
checkheaders: $(HEADEROBJS)
|
||||
HOBJS = $(filter-out $(SKIPHEADERS:.h=.h.o),$(ALLHEADERS:.h=.h.o))
|
||||
checkheaders: $(HOBJS)
|
||||
.SECONDARY: $(HOBJS:.o=.c)
|
||||
|
||||
alltools: $(TOOLS)
|
||||
|
||||
@ -117,8 +119,8 @@ $(TOOLOBJS): | tools
|
||||
|
||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOSTOBJS) $(TESTOBJS) $(HEADEROBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.o *~ *.ho *.map *.ver *.gcno *.gcda
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda
|
||||
DISTCLEANSUFFIXES = *.pc
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d))
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d))
|
||||
|
2
configure
vendored
2
configure
vendored
@ -1211,6 +1211,7 @@ HAVE_LIST="
|
||||
alsa_asoundlib_h
|
||||
altivec_h
|
||||
arpa_inet_h
|
||||
asm_mod_q
|
||||
asm_mod_y
|
||||
asm_types_h
|
||||
attribute_may_alias
|
||||
@ -3119,6 +3120,7 @@ EOF
|
||||
enabled neon && check_asm neon '"vadd.i16 q0, q0, q0"'
|
||||
enabled vfpv3 && check_asm vfpv3 '"vmov.f32 s0, #1.0"'
|
||||
|
||||
check_asm asm_mod_q '"add r0, %Q0, %R0" :: "r"((long long)0)'
|
||||
check_asm asm_mod_y '"vmul.i32 d0, d0, %y0" :: "x"(0)'
|
||||
|
||||
enabled_all armv6t2 shared !pic && enable_pic
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/audioconvert.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
|
||||
@ -41,6 +42,59 @@
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
/* check that a given sample format is supported by the encoder */
|
||||
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
|
||||
while (*p != AV_SAMPLE_FMT_NONE) {
|
||||
if (*p == sample_fmt)
|
||||
return 1;
|
||||
p++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* just pick the highest supported samplerate */
|
||||
static int select_sample_rate(AVCodec *codec)
|
||||
{
|
||||
const int *p;
|
||||
int best_samplerate = 0;
|
||||
|
||||
if (!codec->supported_samplerates)
|
||||
return 44100;
|
||||
|
||||
p = codec->supported_samplerates;
|
||||
while (*p) {
|
||||
best_samplerate = FFMAX(*p, best_samplerate);
|
||||
p++;
|
||||
}
|
||||
return best_samplerate;
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(AVCodec *codec)
|
||||
{
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channells = 0;
|
||||
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channells) {
|
||||
best_ch_layout = *p;
|
||||
best_nb_channells = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio encoding example
|
||||
*/
|
||||
@ -48,11 +102,13 @@ static void audio_encode_example(const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int frame_size, i, j, out_size, outbuf_size;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
int i, j, k, ret, got_output;
|
||||
int buffer_size;
|
||||
FILE *f;
|
||||
short *samples;
|
||||
uint16_t *samples;
|
||||
float t, tincr;
|
||||
uint8_t *outbuf;
|
||||
|
||||
printf("Encode audio file %s\n", filename);
|
||||
|
||||
@ -67,9 +123,19 @@ static void audio_encode_example(const char *filename)
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
c->channels = 2;
|
||||
|
||||
/* check that the encoder supports s16 pcm input */
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
if (!check_sample_fmt(codec, c->sample_fmt)) {
|
||||
fprintf(stderr, "encoder does not support %s",
|
||||
av_get_sample_fmt_name(c->sample_fmt));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
@ -77,35 +143,71 @@ static void audio_encode_example(const char *filename)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* the codec gives us the frame size, in samples */
|
||||
frame_size = c->frame_size;
|
||||
samples = malloc(frame_size * 2 * c->channels);
|
||||
outbuf_size = 10000;
|
||||
outbuf = malloc(outbuf_size);
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* frame containing input raw audio */
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* the codec gives us the frame size, in samples,
|
||||
* we calculate the size of the samples buffer in bytes */
|
||||
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
|
||||
c->sample_fmt, 0);
|
||||
samples = av_malloc(buffer_size);
|
||||
if (!samples) {
|
||||
fprintf(stderr, "could not allocate %d bytes for samples buffer\n",
|
||||
buffer_size);
|
||||
exit(1);
|
||||
}
|
||||
/* setup the data pointers in the AVFrame */
|
||||
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
||||
(const uint8_t*)samples, buffer_size, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "could not setup audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* encode a single tone sound */
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 440.0 / c->sample_rate;
|
||||
for(i=0;i<200;i++) {
|
||||
for(j=0;j<frame_size;j++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
samples[2*j+1] = samples[2*j];
|
||||
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
/* encode the samples */
|
||||
out_size = avcodec_encode_audio(c, outbuf, outbuf_size, samples);
|
||||
fwrite(outbuf, 1, out_size, f);
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "error encoding audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
fclose(f);
|
||||
free(outbuf);
|
||||
free(samples);
|
||||
|
||||
av_freep(&samples);
|
||||
av_freep(&frame);
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
}
|
||||
|
@ -318,9 +318,9 @@ following "Devel" ones:
|
||||
binutils, gcc4-core, make, git, mingw-runtime, texi2html
|
||||
@end example
|
||||
|
||||
And the following "Utils" one:
|
||||
In order to run FATE you will also need the following "Utils" packages:
|
||||
@example
|
||||
diffutils
|
||||
bc, diffutils
|
||||
@end example
|
||||
|
||||
Then run
|
||||
|
@ -267,6 +267,11 @@ value will be sent.
|
||||
Stream identifier to play or to publish. This option overrides the
|
||||
parameter specified in the URI.
|
||||
|
||||
@item rtmp_subscribe
|
||||
Name of live stream to subscribe to. By default no value will be sent.
|
||||
It is only sent if the option is specified or if rtmp_live
|
||||
is set to live.
|
||||
|
||||
@item rtmp_swfurl
|
||||
URL of the SWF player for the media. By default no value will be sent.
|
||||
|
||||
|
391
ffmpeg.h
Normal file
391
ffmpeg.h
Normal file
@ -0,0 +1,391 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef FFMPEG_H
|
||||
#define FFMPEG_H
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <signal.h>
|
||||
|
||||
#if HAVE_PTHREADS
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
#include "cmdutils.h"
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavfilter/avfiltergraph.h"
|
||||
|
||||
#include "libavutil/avutil.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/fifo.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
#include "libavutil/rational.h"
|
||||
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
#define VSYNC_AUTO -1
|
||||
#define VSYNC_PASSTHROUGH 0
|
||||
#define VSYNC_CFR 1
|
||||
#define VSYNC_VFR 2
|
||||
#define VSYNC_DROP 0xff
|
||||
|
||||
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
|
||||
|
||||
/* select an input stream for an output stream */
|
||||
typedef struct StreamMap {
|
||||
int disabled; /** 1 is this mapping is disabled by a negative map */
|
||||
int file_index;
|
||||
int stream_index;
|
||||
int sync_file_index;
|
||||
int sync_stream_index;
|
||||
char *linklabel; /** name of an output link, for mapping lavfi outputs */
|
||||
} StreamMap;
|
||||
|
||||
typedef struct {
|
||||
int file_idx, stream_idx, channel_idx; // input
|
||||
int ofile_idx, ostream_idx; // output
|
||||
} AudioChannelMap;
|
||||
|
||||
typedef struct OptionsContext {
|
||||
/* input/output options */
|
||||
int64_t start_time;
|
||||
const char *format;
|
||||
|
||||
SpecifierOpt *codec_names;
|
||||
int nb_codec_names;
|
||||
SpecifierOpt *audio_channels;
|
||||
int nb_audio_channels;
|
||||
SpecifierOpt *audio_sample_rate;
|
||||
int nb_audio_sample_rate;
|
||||
SpecifierOpt *frame_rates;
|
||||
int nb_frame_rates;
|
||||
SpecifierOpt *frame_sizes;
|
||||
int nb_frame_sizes;
|
||||
SpecifierOpt *frame_pix_fmts;
|
||||
int nb_frame_pix_fmts;
|
||||
|
||||
/* input options */
|
||||
int64_t input_ts_offset;
|
||||
int rate_emu;
|
||||
|
||||
SpecifierOpt *ts_scale;
|
||||
int nb_ts_scale;
|
||||
SpecifierOpt *dump_attachment;
|
||||
int nb_dump_attachment;
|
||||
|
||||
/* output options */
|
||||
StreamMap *stream_maps;
|
||||
int nb_stream_maps;
|
||||
AudioChannelMap *audio_channel_maps; /* one info entry per -map_channel */
|
||||
int nb_audio_channel_maps; /* number of (valid) -map_channel settings */
|
||||
int metadata_global_manual;
|
||||
int metadata_streams_manual;
|
||||
int metadata_chapters_manual;
|
||||
const char **attachments;
|
||||
int nb_attachments;
|
||||
|
||||
int chapters_input_file;
|
||||
|
||||
int64_t recording_time;
|
||||
uint64_t limit_filesize;
|
||||
float mux_preload;
|
||||
float mux_max_delay;
|
||||
|
||||
int video_disable;
|
||||
int audio_disable;
|
||||
int subtitle_disable;
|
||||
int data_disable;
|
||||
|
||||
/* indexed by output file stream index */
|
||||
int *streamid_map;
|
||||
int nb_streamid_map;
|
||||
|
||||
SpecifierOpt *metadata;
|
||||
int nb_metadata;
|
||||
SpecifierOpt *max_frames;
|
||||
int nb_max_frames;
|
||||
SpecifierOpt *bitstream_filters;
|
||||
int nb_bitstream_filters;
|
||||
SpecifierOpt *codec_tags;
|
||||
int nb_codec_tags;
|
||||
SpecifierOpt *sample_fmts;
|
||||
int nb_sample_fmts;
|
||||
SpecifierOpt *qscale;
|
||||
int nb_qscale;
|
||||
SpecifierOpt *forced_key_frames;
|
||||
int nb_forced_key_frames;
|
||||
SpecifierOpt *force_fps;
|
||||
int nb_force_fps;
|
||||
SpecifierOpt *frame_aspect_ratios;
|
||||
int nb_frame_aspect_ratios;
|
||||
SpecifierOpt *rc_overrides;
|
||||
int nb_rc_overrides;
|
||||
SpecifierOpt *intra_matrices;
|
||||
int nb_intra_matrices;
|
||||
SpecifierOpt *inter_matrices;
|
||||
int nb_inter_matrices;
|
||||
SpecifierOpt *top_field_first;
|
||||
int nb_top_field_first;
|
||||
SpecifierOpt *metadata_map;
|
||||
int nb_metadata_map;
|
||||
SpecifierOpt *presets;
|
||||
int nb_presets;
|
||||
SpecifierOpt *copy_initial_nonkeyframes;
|
||||
int nb_copy_initial_nonkeyframes;
|
||||
SpecifierOpt *filters;
|
||||
int nb_filters;
|
||||
} OptionsContext;
|
||||
|
||||
typedef struct InputFilter {
|
||||
AVFilterContext *filter;
|
||||
struct InputStream *ist;
|
||||
struct FilterGraph *graph;
|
||||
uint8_t *name;
|
||||
} InputFilter;
|
||||
|
||||
typedef struct OutputFilter {
|
||||
AVFilterContext *filter;
|
||||
struct OutputStream *ost;
|
||||
struct FilterGraph *graph;
|
||||
uint8_t *name;
|
||||
|
||||
/* temporary storage until stream maps are processed */
|
||||
AVFilterInOut *out_tmp;
|
||||
} OutputFilter;
|
||||
|
||||
typedef struct FilterGraph {
|
||||
int index;
|
||||
const char *graph_desc;
|
||||
|
||||
AVFilterGraph *graph;
|
||||
|
||||
InputFilter **inputs;
|
||||
int nb_inputs;
|
||||
OutputFilter **outputs;
|
||||
int nb_outputs;
|
||||
} FilterGraph;
|
||||
|
||||
typedef struct InputStream {
|
||||
int file_index;
|
||||
AVStream *st;
|
||||
int discard; /* true if stream data should be discarded */
|
||||
int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
|
||||
AVCodec *dec;
|
||||
AVFrame *decoded_frame;
|
||||
|
||||
int64_t start; /* time when read started */
|
||||
/* predicted dts of the next packet read for this stream or (when there are
|
||||
* several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
|
||||
int64_t next_dts;
|
||||
int64_t dts; ///< dts of the last packet read for this stream (in AV_TIME_BASE units)
|
||||
|
||||
int64_t next_pts; ///< synthetic pts for the next decode frame (in AV_TIME_BASE units)
|
||||
int64_t pts; ///< current pts of the decoded frame (in AV_TIME_BASE units)
|
||||
int wrap_correction_done;
|
||||
double ts_scale;
|
||||
int is_start; /* is 1 at the start and after a discontinuity */
|
||||
int saw_first_ts;
|
||||
int showed_multi_packet_warning;
|
||||
AVDictionary *opts;
|
||||
AVRational framerate; /* framerate forced with -r */
|
||||
int top_field_first;
|
||||
|
||||
int resample_height;
|
||||
int resample_width;
|
||||
int resample_pix_fmt;
|
||||
|
||||
int resample_sample_fmt;
|
||||
int resample_sample_rate;
|
||||
int resample_channels;
|
||||
uint64_t resample_channel_layout;
|
||||
|
||||
struct sub2video {
|
||||
int64_t last_pts;
|
||||
AVFilterBufferRef *ref;
|
||||
int w, h;
|
||||
} sub2video;
|
||||
|
||||
/* a pool of free buffers for decoded data */
|
||||
FrameBuffer *buffer_pool;
|
||||
int dr1;
|
||||
|
||||
/* decoded data from this stream goes into all those filters
|
||||
* currently video and audio only */
|
||||
InputFilter **filters;
|
||||
int nb_filters;
|
||||
} InputStream;
|
||||
|
||||
typedef struct InputFile {
|
||||
AVFormatContext *ctx;
|
||||
int eof_reached; /* true if eof reached */
|
||||
int unavailable; /* true if the file is unavailable (possibly temporarily) */
|
||||
int ist_index; /* index of first stream in input_streams */
|
||||
int64_t ts_offset;
|
||||
int nb_streams; /* number of stream that ffmpeg is aware of; may be different
|
||||
from ctx.nb_streams if new streams appear during av_read_frame() */
|
||||
int nb_streams_warn; /* number of streams that the user was warned of */
|
||||
int rate_emu;
|
||||
|
||||
#if HAVE_PTHREADS
|
||||
pthread_t thread; /* thread reading from this file */
|
||||
int finished; /* the thread has exited */
|
||||
int joined; /* the thread has been joined */
|
||||
pthread_mutex_t fifo_lock; /* lock for access to fifo */
|
||||
pthread_cond_t fifo_cond; /* the main thread will signal on this cond after reading from fifo */
|
||||
AVFifoBuffer *fifo; /* demuxed packets are stored here; freed by the main thread */
|
||||
#endif
|
||||
} InputFile;
|
||||
|
||||
typedef struct OutputStream {
|
||||
int file_index; /* file index */
|
||||
int index; /* stream index in the output file */
|
||||
int source_index; /* InputStream index */
|
||||
AVStream *st; /* stream in the output file */
|
||||
int encoding_needed; /* true if encoding needed for this stream */
|
||||
int frame_number;
|
||||
/* input pts and corresponding output pts
|
||||
for A/V sync */
|
||||
struct InputStream *sync_ist; /* input stream to sync against */
|
||||
int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
|
||||
/* pts of the first frame encoded for this stream, used for limiting
|
||||
* recording time */
|
||||
int64_t first_pts;
|
||||
AVBitStreamFilterContext *bitstream_filters;
|
||||
AVCodec *enc;
|
||||
int64_t max_frames;
|
||||
AVFrame *filtered_frame;
|
||||
|
||||
/* video only */
|
||||
AVRational frame_rate;
|
||||
int force_fps;
|
||||
int top_field_first;
|
||||
|
||||
float frame_aspect_ratio;
|
||||
float last_quality;
|
||||
|
||||
/* forced key frames */
|
||||
int64_t *forced_kf_pts;
|
||||
int forced_kf_count;
|
||||
int forced_kf_index;
|
||||
char *forced_keyframes;
|
||||
|
||||
/* audio only */
|
||||
int audio_channels_map[SWR_CH_MAX]; /* list of the channels id to pick from the source stream */
|
||||
int audio_channels_mapped; /* number of channels in audio_channels_map */
|
||||
|
||||
FILE *logfile;
|
||||
|
||||
OutputFilter *filter;
|
||||
char *avfilter;
|
||||
|
||||
int64_t sws_flags;
|
||||
int64_t swr_dither_method;
|
||||
double swr_dither_scale;
|
||||
AVDictionary *opts;
|
||||
int is_past_recording_time;
|
||||
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
|
||||
int stream_copy;
|
||||
const char *attachment_filename;
|
||||
int copy_initial_nonkeyframes;
|
||||
|
||||
int keep_pix_fmt;
|
||||
} OutputStream;
|
||||
|
||||
typedef struct OutputFile {
|
||||
AVFormatContext *ctx;
|
||||
AVDictionary *opts;
|
||||
int ost_index; /* index of the first stream in output_streams */
|
||||
int64_t recording_time; ///< desired length of the resulting file in microseconds == AV_TIME_BASE units
|
||||
int64_t start_time; ///< start time in microseconds == AV_TIME_BASE units
|
||||
uint64_t limit_filesize; /* filesize limit expressed in bytes */
|
||||
} OutputFile;
|
||||
|
||||
extern InputStream **input_streams;
|
||||
extern int nb_input_streams;
|
||||
extern InputFile **input_files;
|
||||
extern int nb_input_files;
|
||||
|
||||
extern OutputStream **output_streams;
|
||||
extern int nb_output_streams;
|
||||
extern OutputFile **output_files;
|
||||
extern int nb_output_files;
|
||||
|
||||
extern FilterGraph **filtergraphs;
|
||||
extern int nb_filtergraphs;
|
||||
|
||||
extern const char *pass_logfilename_prefix;
|
||||
extern char *vstats_filename;
|
||||
|
||||
extern float audio_drift_threshold;
|
||||
extern float dts_delta_threshold;
|
||||
extern float dts_error_threshold;
|
||||
|
||||
extern int audio_volume;
|
||||
extern int audio_sync_method;
|
||||
extern int video_sync_method;
|
||||
extern int do_benchmark;
|
||||
extern int do_benchmark_all;
|
||||
extern int do_deinterlace;
|
||||
extern int do_hex_dump;
|
||||
extern int do_pkt_dump;
|
||||
extern int copy_ts;
|
||||
extern int copy_tb;
|
||||
extern int debug_ts;
|
||||
extern int opt_shortest;
|
||||
extern int exit_on_error;
|
||||
extern int print_stats;
|
||||
extern int qp_hist;
|
||||
extern int same_quant;
|
||||
extern int stdin_interaction;
|
||||
extern int frame_bits_per_raw_sample;
|
||||
extern AVIOContext *progress_avio;
|
||||
|
||||
extern const AVIOInterruptCB int_cb;
|
||||
|
||||
extern const OptionDef options[];
|
||||
|
||||
void term_init(void);
|
||||
void term_exit(void);
|
||||
|
||||
void reset_options(OptionsContext *o, int is_input);
|
||||
void show_usage(void);
|
||||
|
||||
void opt_output_file(void *optctx, const char *filename);
|
||||
|
||||
void assert_avoptions(AVDictionary *m);
|
||||
|
||||
int guess_input_channel_layout(InputStream *ist);
|
||||
|
||||
enum PixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum PixelFormat target);
|
||||
void choose_sample_fmt(AVStream *st, AVCodec *codec);
|
||||
|
||||
int configure_filtergraph(FilterGraph *fg);
|
||||
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out);
|
||||
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist);
|
||||
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
|
||||
|
||||
#endif /* FFMPEG_H */
|
789
ffmpeg_filter.c
Normal file
789
ffmpeg_filter.c
Normal file
@ -0,0 +1,789 @@
|
||||
/*
|
||||
* ffmpeg filter configuration
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavfilter/avfiltergraph.h"
|
||||
#include "libavfilter/buffersink.h"
|
||||
|
||||
#include "libavutil/audioconvert.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
enum PixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum PixelFormat target)
|
||||
{
|
||||
if (codec && codec->pix_fmts) {
|
||||
const enum PixelFormat *p = codec->pix_fmts;
|
||||
int has_alpha= av_pix_fmt_descriptors[target].nb_components % 2 == 0;
|
||||
enum PixelFormat best= PIX_FMT_NONE;
|
||||
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
||||
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
|
||||
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
||||
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
|
||||
PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
|
||||
}
|
||||
}
|
||||
for (; *p != PIX_FMT_NONE; p++) {
|
||||
best= avcodec_find_best_pix_fmt2(best, *p, target, has_alpha, NULL);
|
||||
if (*p == target)
|
||||
break;
|
||||
}
|
||||
if (*p == PIX_FMT_NONE) {
|
||||
if (target != PIX_FMT_NONE)
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
|
||||
av_pix_fmt_descriptors[target].name,
|
||||
codec->name,
|
||||
av_pix_fmt_descriptors[best].name);
|
||||
return best;
|
||||
}
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
void choose_sample_fmt(AVStream *st, AVCodec *codec)
|
||||
{
|
||||
if (codec && codec->sample_fmts) {
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
for (; *p != -1; p++) {
|
||||
if (*p == st->codec->sample_fmt)
|
||||
break;
|
||||
}
|
||||
if (*p == -1) {
|
||||
if((codec->capabilities & CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
|
||||
av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
|
||||
if(av_get_sample_fmt_name(st->codec->sample_fmt))
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
|
||||
av_get_sample_fmt_name(st->codec->sample_fmt),
|
||||
codec->name,
|
||||
av_get_sample_fmt_name(codec->sample_fmts[0]));
|
||||
st->codec->sample_fmt = codec->sample_fmts[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static char *choose_pix_fmts(OutputStream *ost)
|
||||
{
|
||||
if (ost->keep_pix_fmt) {
|
||||
if (ost->filter)
|
||||
avfilter_graph_set_auto_convert(ost->filter->graph->graph,
|
||||
AVFILTER_AUTO_CONVERT_NONE);
|
||||
if (ost->st->codec->pix_fmt == PIX_FMT_NONE)
|
||||
return NULL;
|
||||
return av_strdup(av_get_pix_fmt_name(ost->st->codec->pix_fmt));
|
||||
}
|
||||
if (ost->st->codec->pix_fmt != PIX_FMT_NONE) {
|
||||
return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc, ost->st->codec->pix_fmt)));
|
||||
} else if (ost->enc && ost->enc->pix_fmts) {
|
||||
const enum PixelFormat *p;
|
||||
AVIOContext *s = NULL;
|
||||
uint8_t *ret;
|
||||
int len;
|
||||
|
||||
if (avio_open_dyn_buf(&s) < 0)
|
||||
exit_program(1);
|
||||
|
||||
p = ost->enc->pix_fmts;
|
||||
if (ost->st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||
if (ost->st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
||||
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
|
||||
} else if (ost->st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
||||
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
|
||||
PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
|
||||
}
|
||||
}
|
||||
|
||||
for (; *p != PIX_FMT_NONE; p++) {
|
||||
const char *name = av_get_pix_fmt_name(*p);
|
||||
avio_printf(s, "%s:", name);
|
||||
}
|
||||
len = avio_close_dyn_buf(s, &ret);
|
||||
ret[len - 1] = 0;
|
||||
return ret;
|
||||
} else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Define a function for building a string containing a list of
|
||||
* allowed formats,
|
||||
*/
|
||||
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator)\
|
||||
static char *choose_ ## var ## s(OutputStream *ost) \
|
||||
{ \
|
||||
if (ost->st->codec->var != none) { \
|
||||
get_name(ost->st->codec->var); \
|
||||
return av_strdup(name); \
|
||||
} else if (ost->enc->supported_list) { \
|
||||
const type *p; \
|
||||
AVIOContext *s = NULL; \
|
||||
uint8_t *ret; \
|
||||
int len; \
|
||||
\
|
||||
if (avio_open_dyn_buf(&s) < 0) \
|
||||
exit_program(1); \
|
||||
\
|
||||
for (p = ost->enc->supported_list; *p != none; p++) { \
|
||||
get_name(*p); \
|
||||
avio_printf(s, "%s" separator, name); \
|
||||
} \
|
||||
len = avio_close_dyn_buf(s, &ret); \
|
||||
ret[len - 1] = 0; \
|
||||
return ret; \
|
||||
} else \
|
||||
return NULL; \
|
||||
}
|
||||
|
||||
#define GET_PIX_FMT_NAME(pix_fmt)\
|
||||
const char *name = av_get_pix_fmt_name(pix_fmt);
|
||||
|
||||
// DEF_CHOOSE_FORMAT(enum PixelFormat, pix_fmt, pix_fmts, PIX_FMT_NONE,
|
||||
// GET_PIX_FMT_NAME, ":")
|
||||
|
||||
#define GET_SAMPLE_FMT_NAME(sample_fmt)\
|
||||
const char *name = av_get_sample_fmt_name(sample_fmt)
|
||||
|
||||
DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
|
||||
AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME, ",")
|
||||
|
||||
#define GET_SAMPLE_RATE_NAME(rate)\
|
||||
char name[16];\
|
||||
snprintf(name, sizeof(name), "%d", rate);
|
||||
|
||||
DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
|
||||
GET_SAMPLE_RATE_NAME, ",")
|
||||
|
||||
#define GET_CH_LAYOUT_NAME(ch_layout)\
|
||||
char name[16];\
|
||||
snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
|
||||
|
||||
DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
|
||||
GET_CH_LAYOUT_NAME, ",")
|
||||
|
||||
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
|
||||
{
|
||||
FilterGraph *fg = av_mallocz(sizeof(*fg));
|
||||
|
||||
if (!fg)
|
||||
exit_program(1);
|
||||
fg->index = nb_filtergraphs;
|
||||
|
||||
fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
|
||||
fg->nb_outputs + 1);
|
||||
if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
|
||||
exit_program(1);
|
||||
fg->outputs[0]->ost = ost;
|
||||
fg->outputs[0]->graph = fg;
|
||||
|
||||
ost->filter = fg->outputs[0];
|
||||
|
||||
fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
|
||||
fg->nb_inputs + 1);
|
||||
if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
|
||||
exit_program(1);
|
||||
fg->inputs[0]->ist = ist;
|
||||
fg->inputs[0]->graph = fg;
|
||||
|
||||
ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
|
||||
&ist->nb_filters, ist->nb_filters + 1);
|
||||
ist->filters[ist->nb_filters - 1] = fg->inputs[0];
|
||||
|
||||
filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
|
||||
&nb_filtergraphs, nb_filtergraphs + 1);
|
||||
filtergraphs[nb_filtergraphs - 1] = fg;
|
||||
|
||||
return fg;
|
||||
}
|
||||
|
||||
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
|
||||
{
|
||||
InputStream *ist = NULL;
|
||||
enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
|
||||
int i;
|
||||
|
||||
// TODO: support other filter types
|
||||
if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
|
||||
"currently.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
if (in->name) {
|
||||
AVFormatContext *s;
|
||||
AVStream *st = NULL;
|
||||
char *p;
|
||||
int file_idx = strtol(in->name, &p, 0);
|
||||
|
||||
if (file_idx < 0 || file_idx >= nb_input_files) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
|
||||
file_idx, fg->graph_desc);
|
||||
exit_program(1);
|
||||
}
|
||||
s = input_files[file_idx]->ctx;
|
||||
|
||||
for (i = 0; i < s->nb_streams; i++) {
|
||||
enum AVMediaType stream_type = s->streams[i]->codec->codec_type;
|
||||
if (stream_type != type &&
|
||||
!(stream_type == AVMEDIA_TYPE_SUBTITLE &&
|
||||
type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
|
||||
continue;
|
||||
if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
|
||||
st = s->streams[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!st) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
|
||||
"matches no streams.\n", p, fg->graph_desc);
|
||||
exit_program(1);
|
||||
}
|
||||
ist = input_streams[input_files[file_idx]->ist_index + st->index];
|
||||
} else {
|
||||
/* find the first unused stream of corresponding type */
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
ist = input_streams[i];
|
||||
if (ist->st->codec->codec_type == type && ist->discard)
|
||||
break;
|
||||
}
|
||||
if (i == nb_input_streams) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
|
||||
"unlabeled input pad %d on filter %s\n", in->pad_idx,
|
||||
in->filter_ctx->name);
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
av_assert0(ist);
|
||||
|
||||
ist->discard = 0;
|
||||
ist->decoding_needed = 1;
|
||||
ist->st->discard = AVDISCARD_NONE;
|
||||
|
||||
fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
|
||||
&fg->nb_inputs, fg->nb_inputs + 1);
|
||||
if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
|
||||
exit_program(1);
|
||||
fg->inputs[fg->nb_inputs - 1]->ist = ist;
|
||||
fg->inputs[fg->nb_inputs - 1]->graph = fg;
|
||||
|
||||
ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
|
||||
&ist->nb_filters, ist->nb_filters + 1);
|
||||
ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
|
||||
}
|
||||
|
||||
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
|
||||
{
|
||||
char *pix_fmts;
|
||||
OutputStream *ost = ofilter->ost;
|
||||
AVCodecContext *codec = ost->st->codec;
|
||||
AVFilterContext *last_filter = out->filter_ctx;
|
||||
int pad_idx = out->pad_idx;
|
||||
int ret;
|
||||
char name[255];
|
||||
AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
|
||||
|
||||
snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
|
||||
ret = avfilter_graph_create_filter(&ofilter->filter,
|
||||
avfilter_get_by_name("buffersink"),
|
||||
name, NULL, NULL/*buffersink_params*/, fg->graph);
|
||||
av_freep(&buffersink_params);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (codec->width || codec->height) {
|
||||
char args[255];
|
||||
AVFilterContext *filter;
|
||||
|
||||
snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
|
||||
codec->width,
|
||||
codec->height,
|
||||
(unsigned)ost->sws_flags);
|
||||
snprintf(name, sizeof(name), "scaler for output stream %d:%d",
|
||||
ost->file_index, ost->index);
|
||||
if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
|
||||
name, args, NULL, fg->graph)) < 0)
|
||||
return ret;
|
||||
if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
last_filter = filter;
|
||||
pad_idx = 0;
|
||||
}
|
||||
|
||||
if ((pix_fmts = choose_pix_fmts(ost))) {
|
||||
AVFilterContext *filter;
|
||||
snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
|
||||
ost->file_index, ost->index);
|
||||
if ((ret = avfilter_graph_create_filter(&filter,
|
||||
avfilter_get_by_name("format"),
|
||||
"format", pix_fmts, NULL,
|
||||
fg->graph)) < 0)
|
||||
return ret;
|
||||
if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
last_filter = filter;
|
||||
pad_idx = 0;
|
||||
av_freep(&pix_fmts);
|
||||
}
|
||||
|
||||
if (ost->frame_rate.num && 0) {
|
||||
AVFilterContext *fps;
|
||||
char args[255];
|
||||
|
||||
snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
|
||||
ost->frame_rate.den);
|
||||
snprintf(name, sizeof(name), "fps for output stream %d:%d",
|
||||
ost->file_index, ost->index);
|
||||
ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"),
|
||||
name, args, NULL, fg->graph);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = avfilter_link(last_filter, pad_idx, fps, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
last_filter = fps;
|
||||
pad_idx = 0;
|
||||
}
|
||||
|
||||
if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
|
||||
{
|
||||
OutputStream *ost = ofilter->ost;
|
||||
AVCodecContext *codec = ost->st->codec;
|
||||
AVFilterContext *last_filter = out->filter_ctx;
|
||||
int pad_idx = out->pad_idx;
|
||||
char *sample_fmts, *sample_rates, *channel_layouts;
|
||||
char name[255];
|
||||
int ret;
|
||||
|
||||
|
||||
snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
|
||||
ret = avfilter_graph_create_filter(&ofilter->filter,
|
||||
avfilter_get_by_name("abuffersink"),
|
||||
name, NULL, NULL, fg->graph);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
|
||||
AVFilterContext *filt_ctx; \
|
||||
\
|
||||
av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
|
||||
"similarly to -af " filter_name "=%s.\n", arg); \
|
||||
\
|
||||
ret = avfilter_graph_create_filter(&filt_ctx, \
|
||||
avfilter_get_by_name(filter_name), \
|
||||
filter_name, arg, NULL, fg->graph); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
\
|
||||
ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
\
|
||||
last_filter = filt_ctx; \
|
||||
pad_idx = 0; \
|
||||
} while (0)
|
||||
if (ost->audio_channels_mapped) {
|
||||
int i;
|
||||
AVBPrint pan_buf;
|
||||
av_bprint_init(&pan_buf, 256, 8192);
|
||||
av_bprintf(&pan_buf, "0x%"PRIx64,
|
||||
av_get_default_channel_layout(ost->audio_channels_mapped));
|
||||
for (i = 0; i < ost->audio_channels_mapped; i++)
|
||||
if (ost->audio_channels_map[i] != -1)
|
||||
av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
|
||||
|
||||
AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
|
||||
av_bprint_finalize(&pan_buf, NULL);
|
||||
}
|
||||
|
||||
if (codec->channels && !codec->channel_layout)
|
||||
codec->channel_layout = av_get_default_channel_layout(codec->channels);
|
||||
|
||||
sample_fmts = choose_sample_fmts(ost);
|
||||
sample_rates = choose_sample_rates(ost);
|
||||
channel_layouts = choose_channel_layouts(ost);
|
||||
if (sample_fmts || sample_rates || channel_layouts) {
|
||||
AVFilterContext *format;
|
||||
char args[256];
|
||||
int len = 0;
|
||||
|
||||
if (sample_fmts)
|
||||
len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
|
||||
sample_fmts);
|
||||
if (sample_rates)
|
||||
len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
|
||||
sample_rates);
|
||||
if (channel_layouts)
|
||||
len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
|
||||
channel_layouts);
|
||||
args[len - 1] = 0;
|
||||
|
||||
av_freep(&sample_fmts);
|
||||
av_freep(&sample_rates);
|
||||
av_freep(&channel_layouts);
|
||||
|
||||
snprintf(name, sizeof(name), "audio format for output stream %d:%d",
|
||||
ost->file_index, ost->index);
|
||||
ret = avfilter_graph_create_filter(&format,
|
||||
avfilter_get_by_name("aformat"),
|
||||
name, args, NULL, fg->graph);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = avfilter_link(last_filter, pad_idx, format, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
last_filter = format;
|
||||
pad_idx = 0;
|
||||
}
|
||||
|
||||
if (audio_volume != 256 && 0) {
|
||||
char args[256];
|
||||
|
||||
snprintf(args, sizeof(args), "%f", audio_volume / 256.);
|
||||
AUTO_INSERT_FILTER("-vol", "volume", args);
|
||||
}
|
||||
|
||||
if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define DESCRIBE_FILTER_LINK(f, inout, in) \
|
||||
{ \
|
||||
AVFilterContext *ctx = inout->filter_ctx; \
|
||||
AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads; \
|
||||
int nb_pads = in ? ctx->input_count : ctx->output_count; \
|
||||
AVIOContext *pb; \
|
||||
\
|
||||
if (avio_open_dyn_buf(&pb) < 0) \
|
||||
exit_program(1); \
|
||||
\
|
||||
avio_printf(pb, "%s", ctx->filter->name); \
|
||||
if (nb_pads > 1) \
|
||||
avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));\
|
||||
avio_w8(pb, 0); \
|
||||
avio_close_dyn_buf(pb, &f->name); \
|
||||
}
|
||||
|
||||
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
|
||||
{
|
||||
av_freep(&ofilter->name);
|
||||
DESCRIBE_FILTER_LINK(ofilter, out, 0);
|
||||
|
||||
switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
|
||||
case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
|
||||
case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
|
||||
default: av_assert0(0);
|
||||
}
|
||||
}
|
||||
|
||||
static int sub2video_prepare(InputStream *ist)
|
||||
{
|
||||
AVFormatContext *avf = input_files[ist->file_index]->ctx;
|
||||
int i, ret, w, h;
|
||||
uint8_t *image[4];
|
||||
int linesize[4];
|
||||
|
||||
/* Compute the size of the canvas for the subtitles stream.
|
||||
If the subtitles codec has set a size, use it. Otherwise use the
|
||||
maximum dimensions of the video streams in the same file. */
|
||||
w = ist->st->codec->width;
|
||||
h = ist->st->codec->height;
|
||||
if (!(w && h)) {
|
||||
for (i = 0; i < avf->nb_streams; i++) {
|
||||
if (avf->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
w = FFMAX(w, avf->streams[i]->codec->width);
|
||||
h = FFMAX(h, avf->streams[i]->codec->height);
|
||||
}
|
||||
}
|
||||
if (!(w && h)) {
|
||||
w = FFMAX(w, 720);
|
||||
h = FFMAX(h, 576);
|
||||
}
|
||||
av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
|
||||
}
|
||||
ist->sub2video.w = ist->st->codec->width = w;
|
||||
ist->sub2video.h = ist->st->codec->height = h;
|
||||
|
||||
/* rectangles are PIX_FMT_PAL8, but we have no guarantee that the
|
||||
palettes for all rectangles are identical or compatible */
|
||||
ist->st->codec->pix_fmt = PIX_FMT_RGB32;
|
||||
|
||||
ret = av_image_alloc(image, linesize, w, h, PIX_FMT_RGB32, 32);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
memset(image[0], 0, h * linesize[0]);
|
||||
ist->sub2video.ref = avfilter_get_video_buffer_ref_from_arrays(
|
||||
image, linesize, AV_PERM_READ | AV_PERM_PRESERVE,
|
||||
w, h, PIX_FMT_RGB32);
|
||||
if (!ist->sub2video.ref) {
|
||||
av_free(image[0]);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
AVFilterInOut *in)
|
||||
{
|
||||
AVFilterContext *first_filter = in->filter_ctx;
|
||||
AVFilter *filter = avfilter_get_by_name("buffer");
|
||||
InputStream *ist = ifilter->ist;
|
||||
AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
|
||||
ist->st->time_base;
|
||||
AVRational fr = ist->framerate.num ? ist->framerate :
|
||||
ist->st->r_frame_rate;
|
||||
AVRational sar;
|
||||
AVBPrint args;
|
||||
char name[255];
|
||||
int pad_idx = in->pad_idx;
|
||||
int ret;
|
||||
|
||||
if (ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
|
||||
ret = sub2video_prepare(ist);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
sar = ist->st->sample_aspect_ratio.num ?
|
||||
ist->st->sample_aspect_ratio :
|
||||
ist->st->codec->sample_aspect_ratio;
|
||||
if(!sar.den)
|
||||
sar = (AVRational){0,1};
|
||||
av_bprint_init(&args, 0, 1);
|
||||
av_bprintf(&args,
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
|
||||
"pixel_aspect=%d/%d:sws_param=flags=%d", ist->st->codec->width,
|
||||
ist->st->codec->height, ist->st->codec->pix_fmt,
|
||||
tb.num, tb.den, sar.num, sar.den,
|
||||
SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
|
||||
if (fr.num && fr.den)
|
||||
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
|
||||
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
|
||||
ist->file_index, ist->st->index);
|
||||
|
||||
if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, name,
|
||||
args.str, NULL, fg->graph)) < 0)
|
||||
return ret;
|
||||
|
||||
if (ist->framerate.num) {
|
||||
AVFilterContext *setpts;
|
||||
|
||||
snprintf(name, sizeof(name), "force CFR for input from stream %d:%d",
|
||||
ist->file_index, ist->st->index);
|
||||
if ((ret = avfilter_graph_create_filter(&setpts,
|
||||
avfilter_get_by_name("setpts"),
|
||||
name, "N", NULL,
|
||||
fg->graph)) < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = avfilter_link(setpts, 0, first_filter, pad_idx)) < 0)
|
||||
return ret;
|
||||
|
||||
first_filter = setpts;
|
||||
pad_idx = 0;
|
||||
}
|
||||
|
||||
if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
AVFilterInOut *in)
|
||||
{
|
||||
AVFilterContext *first_filter = in->filter_ctx;
|
||||
AVFilter *filter = avfilter_get_by_name("abuffer");
|
||||
InputStream *ist = ifilter->ist;
|
||||
int pad_idx = in->pad_idx;
|
||||
char args[255], name[255];
|
||||
int ret;
|
||||
|
||||
snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s"
|
||||
":channel_layout=0x%"PRIx64,
|
||||
1, ist->st->codec->sample_rate,
|
||||
ist->st->codec->sample_rate,
|
||||
av_get_sample_fmt_name(ist->st->codec->sample_fmt),
|
||||
ist->st->codec->channel_layout);
|
||||
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
|
||||
ist->file_index, ist->st->index);
|
||||
|
||||
if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter,
|
||||
name, args, NULL,
|
||||
fg->graph)) < 0)
|
||||
return ret;
|
||||
|
||||
#define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg) do { \
|
||||
AVFilterContext *filt_ctx; \
|
||||
\
|
||||
av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
|
||||
"similarly to -af " filter_name "=%s.\n", arg); \
|
||||
\
|
||||
snprintf(name, sizeof(name), "graph %d %s for input stream %d:%d", \
|
||||
fg->index, filter_name, ist->file_index, ist->st->index); \
|
||||
ret = avfilter_graph_create_filter(&filt_ctx, \
|
||||
avfilter_get_by_name(filter_name), \
|
||||
name, arg, NULL, fg->graph); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
\
|
||||
ret = avfilter_link(filt_ctx, 0, first_filter, pad_idx); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
\
|
||||
first_filter = filt_ctx; \
|
||||
} while (0)
|
||||
|
||||
if (audio_sync_method > 0) {
|
||||
char args[256] = {0};
|
||||
|
||||
av_strlcatf(args, sizeof(args), "min_comp=0.001:min_hard_comp=%f", audio_drift_threshold);
|
||||
if (audio_sync_method > 1)
|
||||
av_strlcatf(args, sizeof(args), ":max_soft_comp=%f", audio_sync_method/(double)ist->st->codec->sample_rate);
|
||||
AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
|
||||
}
|
||||
|
||||
// if (ost->audio_channels_mapped) {
|
||||
// int i;
|
||||
// AVBPrint pan_buf;
|
||||
// av_bprint_init(&pan_buf, 256, 8192);
|
||||
// av_bprintf(&pan_buf, "0x%"PRIx64,
|
||||
// av_get_default_channel_layout(ost->audio_channels_mapped));
|
||||
// for (i = 0; i < ost->audio_channels_mapped; i++)
|
||||
// if (ost->audio_channels_map[i] != -1)
|
||||
// av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
|
||||
// AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
|
||||
// av_bprint_finalize(&pan_buf, NULL);
|
||||
// }
|
||||
|
||||
if (audio_volume != 256) {
|
||||
char args[256];
|
||||
|
||||
snprintf(args, sizeof(args), "%f", audio_volume / 256.);
|
||||
AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
|
||||
}
|
||||
if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
AVFilterInOut *in)
|
||||
{
|
||||
av_freep(&ifilter->name);
|
||||
DESCRIBE_FILTER_LINK(ifilter, in, 1);
|
||||
|
||||
switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
|
||||
case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
|
||||
case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
|
||||
default: av_assert0(0);
|
||||
}
|
||||
}
|
||||
|
||||
int configure_filtergraph(FilterGraph *fg)
|
||||
{
|
||||
AVFilterInOut *inputs, *outputs, *cur;
|
||||
int ret, i, init = !fg->graph, simple = !fg->graph_desc;
|
||||
const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
|
||||
fg->graph_desc;
|
||||
|
||||
avfilter_graph_free(&fg->graph);
|
||||
if (!(fg->graph = avfilter_graph_alloc()))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (simple) {
|
||||
OutputStream *ost = fg->outputs[0]->ost;
|
||||
char args[255];
|
||||
snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
|
||||
fg->graph->scale_sws_opts = av_strdup(args);
|
||||
}
|
||||
|
||||
if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
|
||||
return ret;
|
||||
|
||||
if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' does not have "
|
||||
"exactly one input and output.\n", graph_desc);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
for (cur = inputs; !simple && init && cur; cur = cur->next)
|
||||
init_input_filter(fg, cur);
|
||||
|
||||
for (cur = inputs, i = 0; cur; cur = cur->next, i++)
|
||||
if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0)
|
||||
return ret;
|
||||
avfilter_inout_free(&inputs);
|
||||
|
||||
if (!init || simple) {
|
||||
/* we already know the mappings between lavfi outputs and output streams,
|
||||
* so we can finish the setup */
|
||||
for (cur = outputs, i = 0; cur; cur = cur->next, i++)
|
||||
configure_output_filter(fg, fg->outputs[i], cur);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
/* wait until output mappings are processed */
|
||||
for (cur = outputs; cur;) {
|
||||
fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
|
||||
&fg->nb_outputs, fg->nb_outputs + 1);
|
||||
if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
|
||||
exit_program(1);
|
||||
fg->outputs[fg->nb_outputs - 1]->graph = fg;
|
||||
fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
|
||||
cur = cur->next;
|
||||
fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < fg->nb_inputs; i++)
|
||||
if (fg->inputs[i]->ist == ist)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
2300
ffmpeg_opt.c
Normal file
2300
ffmpeg_opt.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -264,6 +264,7 @@ static int scale_vector(int16_t *vector, int length)
|
||||
for (i = 0; i < length; i++)
|
||||
max = FFMAX(max, FFABS(vector[i]));
|
||||
|
||||
max = FFMIN(max, 0x7FFF);
|
||||
bits = normalize_bits(max, 15);
|
||||
scale = shift_table[bits];
|
||||
|
||||
@ -913,6 +914,7 @@ static void formant_postfilter(G723_1_Context *p, int16_t *lpc, int16_t *buf)
|
||||
}
|
||||
iir_filter(filter_coef[0], filter_coef[1], buf + i,
|
||||
filter_signal + i, 1);
|
||||
lpc += LPC_ORDER;
|
||||
}
|
||||
|
||||
memcpy(p->fir_mem, buf + FRAME_LEN, LPC_ORDER * sizeof(int16_t));
|
||||
|
@ -419,6 +419,8 @@ static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
|
||||
c->cur_frame++;
|
||||
|
||||
c->last_bits_used = get_bits_count(gb);
|
||||
if(get_bits_left(gb) < 8) // we have only padding left
|
||||
c->last_bits_used = buf_size << 3;
|
||||
if(c->cur_frame >= c->frames)
|
||||
c->cur_frame = 0;
|
||||
|
||||
|
@ -1686,7 +1686,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
|
||||
if (s->frame_size <= 0 || s->frame_size > buf_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}else if(s->frame_size < buf_size){
|
||||
} else if (s->frame_size < buf_size) {
|
||||
av_log(avctx, AV_LOG_DEBUG, "incorrect frame size - multiple frames in buffer?\n");
|
||||
buf_size= s->frame_size;
|
||||
}
|
||||
|
@ -24,6 +24,8 @@
|
||||
* The simplest mpeg audio layer 2 encoder.
|
||||
*/
|
||||
|
||||
#include "libavutil/audioconvert.h"
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "internal.h"
|
||||
#include "put_bits.h"
|
||||
@ -792,6 +794,9 @@ AVCodec ff_mp2_encoder = {
|
||||
.supported_samplerates = (const int[]){
|
||||
44100, 48000, 32000, 22050, 24000, 16000, 0
|
||||
},
|
||||
.channel_layouts = (const uint64_t[]){ AV_CH_LAYOUT_MONO,
|
||||
AV_CH_LAYOUT_STEREO,
|
||||
0 },
|
||||
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
|
||||
.defaults = mp2_defaults,
|
||||
};
|
||||
|
@ -28,7 +28,7 @@ SECTION_RODATA
|
||||
|
||||
cextern pw_255
|
||||
|
||||
SECTION_TEXT 16
|
||||
SECTION_TEXT
|
||||
|
||||
; %1 = nr. of xmm registers used
|
||||
%macro ADD_BYTES_FN 1
|
||||
|
@ -131,6 +131,8 @@ const CodecMime ff_id3v2_mime_tags[] = {
|
||||
{"image/png" , AV_CODEC_ID_PNG},
|
||||
{"image/tiff", AV_CODEC_ID_TIFF},
|
||||
{"image/bmp", AV_CODEC_ID_BMP},
|
||||
{"JPG", AV_CODEC_ID_MJPEG}, /* ID3v2.2 */
|
||||
{"PNG" , AV_CODEC_ID_PNG}, /* ID3v2.2 */
|
||||
{"", AV_CODEC_ID_NONE},
|
||||
};
|
||||
|
||||
|
@ -32,6 +32,15 @@
|
||||
#define HMAC_IPAD_VAL 0x36
|
||||
#define HMAC_OPAD_VAL 0x5C
|
||||
|
||||
/**
|
||||
* A non-zero transaction id requires the server to send back
|
||||
* a _result or _error response.
|
||||
* Setting it to 0 marks the message as a notification not
|
||||
* requiring feedback.
|
||||
*/
|
||||
|
||||
#define RTMP_NOTIFICATION 0
|
||||
|
||||
/**
|
||||
* emulated Flash client version - 9.0.124.2 on Linux
|
||||
* @{
|
||||
|
@ -91,6 +91,7 @@ typedef struct RTMPContext {
|
||||
char* flashver; ///< version of the flash plugin
|
||||
char* swfurl; ///< url of the swf player
|
||||
char* pageurl; ///< url of the web page
|
||||
char* subscribe; ///< name of live stream to subscribe
|
||||
int server_bw; ///< server bandwidth
|
||||
int client_buffer_time; ///< client buffer time in ms
|
||||
int flush_interval; ///< number of packets flushed in the same request (RTMPT only)
|
||||
@ -572,7 +573,7 @@ static int gen_check_bw(URLContext *s, RTMPContext *rt)
|
||||
|
||||
p = pkt.data;
|
||||
ff_amf_write_string(&p, "_checkbw");
|
||||
ff_amf_write_number(&p, ++rt->nb_invokes);
|
||||
ff_amf_write_number(&p, RTMP_NOTIFICATION);
|
||||
ff_amf_write_null(&p);
|
||||
|
||||
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
|
||||
@ -604,6 +605,30 @@ static int gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen_fcsubscribe_stream(URLContext *s, RTMPContext *rt,
|
||||
const char *subscribe)
|
||||
{
|
||||
RTMPPacket pkt;
|
||||
uint8_t *p;
|
||||
int ret;
|
||||
|
||||
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
|
||||
0, 27 + strlen(subscribe))) < 0)
|
||||
return ret;
|
||||
|
||||
p = pkt.data;
|
||||
ff_amf_write_string(&p, "FCSubscribe");
|
||||
ff_amf_write_number(&p, ++rt->nb_invokes);
|
||||
ff_amf_write_null(&p);
|
||||
ff_amf_write_string(&p, subscribe);
|
||||
|
||||
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
|
||||
rt->prev_pkt[1]);
|
||||
ff_rtmp_packet_destroy(&pkt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ff_rtmp_calc_digest(const uint8_t *src, int len, int gap,
|
||||
const uint8_t *key, int keylen, uint8_t *dst)
|
||||
{
|
||||
@ -1011,6 +1036,20 @@ static int handle_invoke(URLContext *s, RTMPPacket *pkt)
|
||||
}
|
||||
if ((ret = gen_create_stream(s, rt)) < 0)
|
||||
return ret;
|
||||
|
||||
if (rt->is_input) {
|
||||
/* Send the FCSubscribe command when the name of live
|
||||
* stream is defined by the user or if it's a live stream. */
|
||||
if (rt->subscribe) {
|
||||
if ((ret = gen_fcsubscribe_stream(s, rt,
|
||||
rt->subscribe)) < 0)
|
||||
return ret;
|
||||
} else if (rt->live == -1) {
|
||||
if ((ret = gen_fcsubscribe_stream(s, rt,
|
||||
rt->playpath)) < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case STATE_FCPUBLISH:
|
||||
rt->state = STATE_CONNECTING;
|
||||
@ -1593,115 +1632,35 @@ static const AVOption rtmp_options[] = {
|
||||
{"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {0}, 0, 0, DEC, "rtmp_live"},
|
||||
{"rtmp_pageurl", "URL of the web page in which the media was embedded. By default no value will be sent.", OFFSET(pageurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
|
||||
{"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
||||
{"rtmp_subscribe", "Name of live stream to subscribe to. Defaults to rtmp_playpath.", OFFSET(subscribe), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
|
||||
{"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
||||
{"rtmp_tcurl", "URL of the target stream. Defaults to proto://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
static const AVClass rtmp_class = {
|
||||
.class_name = "rtmp",
|
||||
.item_name = av_default_item_name,
|
||||
.option = rtmp_options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
#define RTMP_PROTOCOL(flavor) \
|
||||
static const AVClass flavor##_class = { \
|
||||
.class_name = #flavor, \
|
||||
.item_name = av_default_item_name, \
|
||||
.option = rtmp_options, \
|
||||
.version = LIBAVUTIL_VERSION_INT, \
|
||||
}; \
|
||||
\
|
||||
URLProtocol ff_##flavor##_protocol = { \
|
||||
.name = #flavor, \
|
||||
.url_open = rtmp_open, \
|
||||
.url_read = rtmp_read, \
|
||||
.url_write = rtmp_write, \
|
||||
.url_close = rtmp_close, \
|
||||
.priv_data_size = sizeof(RTMPContext), \
|
||||
.flags = URL_PROTOCOL_FLAG_NETWORK, \
|
||||
.priv_data_class= &flavor##_class, \
|
||||
};
|
||||
|
||||
URLProtocol ff_rtmp_protocol = {
|
||||
.name = "rtmp",
|
||||
.url_open = rtmp_open,
|
||||
.url_read = rtmp_read,
|
||||
.url_write = rtmp_write,
|
||||
.url_close = rtmp_close,
|
||||
.priv_data_size = sizeof(RTMPContext),
|
||||
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||
.priv_data_class= &rtmp_class,
|
||||
};
|
||||
|
||||
static const AVClass rtmpe_class = {
|
||||
.class_name = "rtmpe",
|
||||
.item_name = av_default_item_name,
|
||||
.option = rtmp_options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
URLProtocol ff_rtmpe_protocol = {
|
||||
.name = "rtmpe",
|
||||
.url_open = rtmp_open,
|
||||
.url_read = rtmp_read,
|
||||
.url_write = rtmp_write,
|
||||
.url_close = rtmp_close,
|
||||
.priv_data_size = sizeof(RTMPContext),
|
||||
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||
.priv_data_class = &rtmpe_class,
|
||||
};
|
||||
|
||||
static const AVClass rtmps_class = {
|
||||
.class_name = "rtmps",
|
||||
.item_name = av_default_item_name,
|
||||
.option = rtmp_options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
URLProtocol ff_rtmps_protocol = {
|
||||
.name = "rtmps",
|
||||
.url_open = rtmp_open,
|
||||
.url_read = rtmp_read,
|
||||
.url_write = rtmp_write,
|
||||
.url_close = rtmp_close,
|
||||
.priv_data_size = sizeof(RTMPContext),
|
||||
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||
.priv_data_class = &rtmps_class,
|
||||
};
|
||||
|
||||
static const AVClass rtmpt_class = {
|
||||
.class_name = "rtmpt",
|
||||
.item_name = av_default_item_name,
|
||||
.option = rtmp_options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
URLProtocol ff_rtmpt_protocol = {
|
||||
.name = "rtmpt",
|
||||
.url_open = rtmp_open,
|
||||
.url_read = rtmp_read,
|
||||
.url_write = rtmp_write,
|
||||
.url_close = rtmp_close,
|
||||
.priv_data_size = sizeof(RTMPContext),
|
||||
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||
.priv_data_class = &rtmpt_class,
|
||||
};
|
||||
|
||||
static const AVClass rtmpte_class = {
|
||||
.class_name = "rtmpte",
|
||||
.item_name = av_default_item_name,
|
||||
.option = rtmp_options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
URLProtocol ff_rtmpte_protocol = {
|
||||
.name = "rtmpte",
|
||||
.url_open = rtmp_open,
|
||||
.url_read = rtmp_read,
|
||||
.url_write = rtmp_write,
|
||||
.url_close = rtmp_close,
|
||||
.priv_data_size = sizeof(RTMPContext),
|
||||
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||
.priv_data_class = &rtmpte_class,
|
||||
};
|
||||
|
||||
static const AVClass rtmpts_class = {
|
||||
.class_name = "rtmpts",
|
||||
.item_name = av_default_item_name,
|
||||
.option = rtmp_options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
URLProtocol ff_rtmpts_protocol = {
|
||||
.name = "rtmpts",
|
||||
.url_open = rtmp_open,
|
||||
.url_read = rtmp_read,
|
||||
.url_write = rtmp_write,
|
||||
.url_close = rtmp_close,
|
||||
.priv_data_size = sizeof(RTMPContext),
|
||||
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||
.priv_data_class = &rtmpts_class,
|
||||
};
|
||||
RTMP_PROTOCOL(rtmp)
|
||||
RTMP_PROTOCOL(rtmpe)
|
||||
RTMP_PROTOCOL(rtmps)
|
||||
RTMP_PROTOCOL(rtmpt)
|
||||
RTMP_PROTOCOL(rtmpte)
|
||||
RTMP_PROTOCOL(rtmpts)
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
#define LIBAVFORMAT_VERSION_MAJOR 54
|
||||
#define LIBAVFORMAT_VERSION_MINOR 22
|
||||
#define LIBAVFORMAT_VERSION_MICRO 102
|
||||
#define LIBAVFORMAT_VERSION_MICRO 103
|
||||
|
||||
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
|
||||
LIBAVFORMAT_VERSION_MINOR, \
|
||||
|
@ -97,6 +97,8 @@ static av_always_inline av_const int FASTDIV(int a, int b)
|
||||
|
||||
#endif /* HAVE_ARMV6 */
|
||||
|
||||
#if HAVE_ASM_MOD_Q
|
||||
|
||||
#define av_clipl_int32 av_clipl_int32_arm
|
||||
static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
|
||||
{
|
||||
@ -110,6 +112,8 @@ static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
|
||||
return x;
|
||||
}
|
||||
|
||||
#endif /* HAVE_ASM_MOD_Q */
|
||||
|
||||
#endif /* HAVE_INLINE_ASM */
|
||||
|
||||
#endif /* AVUTIL_ARM_INTMATH_H */
|
||||
|
@ -61,6 +61,8 @@ static av_always_inline void AV_WN32(void *p, uint32_t v)
|
||||
__asm__ ("str %1, %0" : "=m"(*(uint32_t *)p) : "r"(v));
|
||||
}
|
||||
|
||||
#if HAVE_ASM_MOD_Q
|
||||
|
||||
#define AV_RN64 AV_RN64
|
||||
static av_always_inline uint64_t AV_RN64(const void *p)
|
||||
{
|
||||
@ -82,6 +84,8 @@ static av_always_inline void AV_WN64(void *p, uint64_t v)
|
||||
: "r"(v));
|
||||
}
|
||||
|
||||
#endif /* HAVE_ASM_MOD_Q */
|
||||
|
||||
#endif /* HAVE_INLINE_ASM */
|
||||
|
||||
#endif /* AVUTIL_ARM_INTREADWRITE_H */
|
||||
|
@ -20,7 +20,7 @@ $(SUBDIR)x86/%.o: $(SUBDIR)x86/%.asm
|
||||
$(DEPYASM) $(YASMFLAGS) -I $(<D)/ -M -o $@ $< > $(@:.o=.d)
|
||||
$(YASM) $(YASMFLAGS) -I $(<D)/ -o $@ $<
|
||||
|
||||
$(OBJS) $(OBJS:.o=.s) $(SUBDIR)%.ho $(TESTOBJS): CPPFLAGS += -DHAVE_AV_CONFIG_H
|
||||
$(OBJS) $(OBJS:.o=.s) $(SUBDIR)%.h.o $(TESTOBJS): CPPFLAGS += -DHAVE_AV_CONFIG_H
|
||||
$(TESTOBJS): CPPFLAGS += -DTEST
|
||||
|
||||
$(SUBDIR)$(LIBNAME): $(OBJS)
|
||||
@ -87,7 +87,7 @@ uninstall-libs::
|
||||
-$(RM) "$(LIBDIR)/$(LIBNAME)"
|
||||
|
||||
uninstall-headers::
|
||||
$(RM) $(addprefix "$(INCINSTDIR)/",$(HEADERS)) $(addprefix "$(INCINSTDIR)/",$(BUILT_HEADERS))
|
||||
$(RM) $(addprefix "$(INCINSTDIR)/",$(HEADERS) $(BUILT_HEADERS))
|
||||
$(RM) "$(LIBDIR)/pkgconfig/lib$(NAME).pc"
|
||||
-rmdir "$(INCINSTDIR)"
|
||||
endef
|
||||
|
@ -167,8 +167,14 @@ FATE_VIDEO += fate-mpeg2-field-enc
|
||||
fate-mpeg2-field-enc: CMD = framecrc -flags +bitexact -dct fastint -idct simple -i $(SAMPLES)/mpeg2/mpeg2_field_encoding.ts -an
|
||||
|
||||
# FIXME dropped frames in this test because of coarse timebase
|
||||
FATE_VIDEO += fate-nuv
|
||||
fate-nuv: CMD = framecrc -idct simple -i $(SAMPLES)/nuv/Today.nuv -an
|
||||
FATE_NUV += fate-nuv-rtjpeg
|
||||
fate-nuv-rtjpeg: CMD = framecrc -idct simple -i $(SAMPLES)/nuv/Today.nuv -an
|
||||
|
||||
FATE_NUV += fate-nuv-rtjpeg-fh
|
||||
fate-nuv-rtjpeg-fh: CMD = framecrc -idct simple -i $(SAMPLES)/nuv/rtjpeg_frameheader.nuv -an
|
||||
|
||||
FATE_VIDEO += $(FATE_NUV)
|
||||
fate-nuv: $(FATE_NUV)
|
||||
|
||||
FATE_VIDEO += fate-paf-video
|
||||
fate-paf-video: CMD = framecrc -i $(SAMPLES)/paf/hod1-partial.paf -pix_fmt rgb24 -an
|
||||
|
@ -1,4 +1,4 @@
|
||||
dec0deb2425e908d232d2471acff04a3 *tests/data/fate/acodec-g723_1.g723_1
|
||||
4800 tests/data/fate/acodec-g723_1.g723_1
|
||||
87fd529c9e41914f73a865d147cc9516 *tests/data/fate/acodec-g723_1.out.wav
|
||||
stddev: 8425.98 PSNR: 17.82 MAXDIFF:53268 bytes: 95992/ 96000
|
||||
d70776846d77c652bceed281fcca9cc8 *tests/data/fate/acodec-g723_1.out.wav
|
||||
stddev: 8423.47 PSNR: 17.82 MAXDIFF:53292 bytes: 95992/ 96000
|
||||
|
51
tests/ref/fate/nuv-rtjpeg-fh
Normal file
51
tests/ref/fate/nuv-rtjpeg-fh
Normal file
@ -0,0 +1,51 @@
|
||||
#tb 0: 1/50
|
||||
0, 0, 0, 1, 221184, 0xf48c94f6
|
||||
0, 2, 2, 1, 221184, 0x89b625b2
|
||||
0, 3, 3, 1, 221184, 0x37e04714
|
||||
0, 4, 4, 1, 221184, 0x4f4c5224
|
||||
0, 5, 5, 1, 221184, 0x9193c9f1
|
||||
0, 6, 6, 1, 221184, 0x5d1a6197
|
||||
0, 7, 7, 1, 221184, 0x40cd51e7
|
||||
0, 8, 8, 1, 221184, 0xb2c1a729
|
||||
0, 10, 10, 1, 221184, 0x998d6144
|
||||
0, 11, 11, 1, 221184, 0xf5d52311
|
||||
0, 12, 12, 1, 221184, 0xea9dd6bf
|
||||
0, 13, 13, 1, 221184, 0x0e2ed854
|
||||
0, 14, 14, 1, 221184, 0xe295ba58
|
||||
0, 15, 15, 1, 221184, 0x8aedbb69
|
||||
0, 16, 16, 1, 221184, 0x253c9aaa
|
||||
0, 17, 17, 1, 221184, 0x5eaf9fb1
|
||||
0, 18, 18, 1, 221184, 0xcdb5a0cb
|
||||
0, 19, 19, 1, 221184, 0xcdb5a0cb
|
||||
0, 20, 20, 1, 221184, 0x23f89994
|
||||
0, 21, 21, 1, 221184, 0x23f89994
|
||||
0, 22, 22, 1, 221184, 0x10dc98d6
|
||||
0, 23, 23, 1, 221184, 0x799b9d98
|
||||
0, 24, 24, 1, 221184, 0xb226996c
|
||||
0, 25, 25, 1, 221184, 0x0ac59a42
|
||||
0, 26, 26, 1, 221184, 0x87c2a654
|
||||
0, 27, 27, 1, 221184, 0xf4c1a711
|
||||
0, 28, 28, 1, 221184, 0xf60fa72e
|
||||
0, 29, 29, 1, 221184, 0xc8f8b6fc
|
||||
0, 30, 30, 1, 221184, 0xd709b813
|
||||
0, 31, 31, 1, 221184, 0x5fdfb76b
|
||||
0, 32, 32, 1, 221184, 0x5798b0aa
|
||||
0, 33, 33, 1, 221184, 0xf572b1c3
|
||||
0, 34, 34, 1, 221184, 0x14b0afdf
|
||||
0, 35, 35, 1, 221184, 0x0a66b5b8
|
||||
0, 36, 36, 1, 221184, 0xe316c620
|
||||
0, 37, 37, 1, 221184, 0xbc76c5c2
|
||||
0, 38, 38, 1, 221184, 0x77c7c5e5
|
||||
0, 39, 39, 1, 221184, 0xfc7ac63e
|
||||
0, 40, 40, 1, 221184, 0x05a29ffe
|
||||
0, 41, 41, 1, 221184, 0x9bffbf6c
|
||||
0, 42, 42, 1, 221184, 0x3c55be40
|
||||
0, 43, 43, 1, 221184, 0x6f46c14e
|
||||
0, 44, 44, 1, 221184, 0x9cf4ae70
|
||||
0, 45, 45, 1, 221184, 0xf205b2f8
|
||||
0, 46, 46, 1, 221184, 0x7180aff8
|
||||
0, 47, 47, 1, 221184, 0x125eaffe
|
||||
0, 48, 48, 1, 221184, 0x6970a32d
|
||||
0, 49, 49, 1, 221184, 0xaea79f62
|
||||
0, 50, 50, 1, 221184, 0x48d2a093
|
||||
0, 51, 51, 1, 221184, 0x10a59eb5
|
Loading…
Reference in New Issue
Block a user