1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  APIchanges: fill in missing hashes and dates.
  Add an APIChanges entry and bump minor versions for recent changes.
  ffmpeg: print the low bitrate warning after the codec is openend.
  doxygen: Move function documentation into the macro generating the function.
  doxygen: Make sure parameter names match between .c and .h files.
  h264: move fill_decode_neighbors()/fill_decode_caches() to h264_mvpred.h
  H.264: Add more x86 assembly for 10-bit H.264 predict functions
  lavf: fix invalid reads in avformat_find_stream_info()
  cmdutils: replace opt_default with opt_default2() and remove set_context_opts
  ffmpeg: use new avcodec_open2 and avformat_find_stream_info API.
  ffplay: use new avcodec_open2 and avformat_find_stream_info API.
  cmdutils: store all codec options in one dict instead of video/audio/sub
  ffmpeg: check experimental flag after codec is opened.
  ffmpeg: do not set GLOBAL_HEADER flag in the options context

Conflicts:
	cmdutils.c
	doc/APIchanges
	ffmpeg.c
	ffplay.c
	libavcodec/version.h
	libavformat/version.h
	libswscale/swscale_unscaled.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2011-07-14 20:44:58 +02:00
commit 5dc6bd86f0
26 changed files with 941 additions and 626 deletions

View File

@ -1382,7 +1382,8 @@ PREDEFINED = "__attribute__(x)=" \
# The macro definition that is found in the sources will be used. # The macro definition that is found in the sources will be used.
# Use the PREDEFINED tag if you want to use a different macro definition. # Use the PREDEFINED tag if you want to use a different macro definition.
EXPAND_AS_DEFINED = declare_idct EXPAND_AS_DEFINED = declare_idct \
READ_PAR_DATA \
# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
# doxygen's preprocessor will remove all function-like macros that are alone # doxygen's preprocessor will remove all function-like macros that are alone

View File

@ -49,13 +49,10 @@
#include <sys/resource.h> #include <sys/resource.h>
#endif #endif
const char **opt_names;
const char **opt_values;
static int opt_name_count;
AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB]; AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
AVFormatContext *avformat_opts; AVFormatContext *avformat_opts;
struct SwsContext *sws_opts; struct SwsContext *sws_opts;
AVDictionary *format_opts, *video_opts, *audio_opts, *sub_opts; AVDictionary *format_opts, *codec_opts;
static const int this_year = 2011; static const int this_year = 2011;
@ -81,17 +78,8 @@ void uninit_opts(void)
sws_freeContext(sws_opts); sws_freeContext(sws_opts);
sws_opts = NULL; sws_opts = NULL;
#endif #endif
for (i = 0; i < opt_name_count; i++) {
av_freep(&opt_names[i]);
av_freep(&opt_values[i]);
}
av_freep(&opt_names);
av_freep(&opt_values);
opt_name_count = 0;
av_dict_free(&format_opts); av_dict_free(&format_opts);
av_dict_free(&video_opts); av_dict_free(&codec_opts);
av_dict_free(&audio_opts);
av_dict_free(&sub_opts);
} }
void log_callback_help(void* ptr, int level, const char* fmt, va_list vl) void log_callback_help(void* ptr, int level, const char* fmt, va_list vl)
@ -297,20 +285,14 @@ unknown_opt:
} }
#define FLAGS (o->type == FF_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0 #define FLAGS (o->type == FF_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0
#define SET_PREFIXED_OPTS(ch, flag, output) \ int opt_default(const char *opt, const char *arg)
if (opt[0] == ch && avcodec_opts[0] && (o = av_opt_find(avcodec_opts[0], opt+1, NULL, flag, 0)))\
av_dict_set(&output, opt+1, arg, FLAGS);
static int opt_default2(const char *opt, const char *arg)
{ {
const AVOption *o; const AVOption *o;
if ((o = av_opt_find(avcodec_opts[0], opt, NULL, 0, AV_OPT_SEARCH_CHILDREN))) { if ((o = av_opt_find(avcodec_opts[0], opt, NULL, 0, AV_OPT_SEARCH_CHILDREN)) ||
if (o->flags & AV_OPT_FLAG_VIDEO_PARAM) ((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') &&
av_dict_set(&video_opts, opt, arg, FLAGS); (o = av_opt_find(avcodec_opts[0], opt+1, NULL, 0, 0))))
if (o->flags & AV_OPT_FLAG_AUDIO_PARAM) av_dict_set(&codec_opts, opt, arg, FLAGS);
av_dict_set(&audio_opts, opt, arg, FLAGS); else if ((o = av_opt_find(avformat_opts, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN)))
if (o->flags & AV_OPT_FLAG_SUBTITLE_PARAM)
av_dict_set(&sub_opts, opt, arg, FLAGS);
} else if ((o = av_opt_find(avformat_opts, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN)))
av_dict_set(&format_opts, opt, arg, FLAGS); av_dict_set(&format_opts, opt, arg, FLAGS);
else if ((o = av_opt_find(sws_opts, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN))) { else if ((o = av_opt_find(sws_opts, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN))) {
// XXX we only support sws_flags, not arbitrary sws options // XXX we only support sws_flags, not arbitrary sws options
@ -321,18 +303,13 @@ static int opt_default2(const char *opt, const char *arg)
} }
} }
if (!o) {
SET_PREFIXED_OPTS('v', AV_OPT_FLAG_VIDEO_PARAM, video_opts)
SET_PREFIXED_OPTS('a', AV_OPT_FLAG_AUDIO_PARAM, audio_opts)
SET_PREFIXED_OPTS('s', AV_OPT_FLAG_SUBTITLE_PARAM, sub_opts)
}
if (o) if (o)
return 0; return 0;
fprintf(stderr, "Unrecognized option '%s'\n", opt); fprintf(stderr, "Unrecognized option '%s'\n", opt);
return AVERROR_OPTION_NOT_FOUND; return AVERROR_OPTION_NOT_FOUND;
} }
#if 0
<<<<<<< HEAD
int opt_default(const char *opt, const char *arg){ int opt_default(const char *opt, const char *arg){
int type; int type;
int ret= 0; int ret= 0;
@ -408,6 +385,70 @@ int opt_default(const char *opt, const char *arg){
return 0; return 0;
} }
||||||| merged common ancestors
int opt_default(const char *opt, const char *arg){
int type;
int ret= 0;
const AVOption *o= NULL;
int opt_types[]={AV_OPT_FLAG_VIDEO_PARAM, AV_OPT_FLAG_AUDIO_PARAM, 0, AV_OPT_FLAG_SUBTITLE_PARAM, 0};
for(type=0; *avcodec_opts && type<AVMEDIA_TYPE_NB && ret>= 0; type++){
const AVOption *o2 = av_opt_find(avcodec_opts[0], opt, NULL, opt_types[type], 0);
if(o2)
ret = av_set_string3(avcodec_opts[type], opt, arg, 1, &o);
}
if(!o && avformat_opts)
ret = av_set_string3(avformat_opts, opt, arg, 1, &o);
if(!o && sws_opts)
ret = av_set_string3(sws_opts, opt, arg, 1, &o);
if(!o){
if (opt[0] == 'a' && avcodec_opts[AVMEDIA_TYPE_AUDIO])
ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_AUDIO], opt+1, arg, 1, &o);
else if(opt[0] == 'v' && avcodec_opts[AVMEDIA_TYPE_VIDEO])
ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_VIDEO], opt+1, arg, 1, &o);
else if(opt[0] == 's' && avcodec_opts[AVMEDIA_TYPE_SUBTITLE])
ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_SUBTITLE], opt+1, arg, 1, &o);
}
if (o && ret < 0) {
fprintf(stderr, "Invalid value '%s' for option '%s'\n", arg, opt);
exit(1);
}
if (!o) {
AVCodec *p = NULL;
AVOutputFormat *oformat = NULL;
while ((p=av_codec_next(p))){
const AVClass *c = p->priv_class;
if(c && av_opt_find(&c, opt, NULL, 0, 0))
break;
}
if (!p) {
while ((oformat = av_oformat_next(oformat))) {
const AVClass *c = oformat->priv_class;
if (c && av_opt_find(&c, opt, NULL, 0, 0))
break;
}
}
}
if ((ret = opt_default2(opt, arg)) < 0)
return ret;
// av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avcodec_opts, opt, NULL), (int)av_get_int(avcodec_opts, opt, NULL));
//FIXME we should always use avcodec_opts, ... for storing options so there will not be any need to keep track of what i set over this
opt_values= av_realloc(opt_values, sizeof(void*)*(opt_name_count+1));
opt_values[opt_name_count]= o ? NULL : av_strdup(arg);
opt_names= av_realloc(opt_names, sizeof(void*)*(opt_name_count+1));
opt_names[opt_name_count++]= o ? o->name : av_strdup(opt);
if ((*avcodec_opts && avcodec_opts[0]->debug) || (avformat_opts && avformat_opts->debug))
av_log_set_level(AV_LOG_DEBUG);
return 0;
}
=======
>>>>>>> qatar/master
#endif
int opt_loglevel(const char *opt, const char *arg) int opt_loglevel(const char *opt, const char *arg)
{ {
const struct { const char *name; int level; } log_levels[] = { const struct { const char *name; int level; } log_levels[] = {
@ -456,59 +497,6 @@ int opt_timelimit(const char *opt, const char *arg)
return 0; return 0;
} }
static void *alloc_priv_context(int size, const AVClass *class)
{
void *p = av_mallocz(size);
if (p) {
*(const AVClass **)p = class;
av_opt_set_defaults(p);
}
return p;
}
void set_context_opts(void *ctx, void *opts_ctx, int flags, AVCodec *codec)
{
int i;
void *priv_ctx=NULL;
if(!strcmp("AVCodecContext", (*(AVClass**)ctx)->class_name)){
AVCodecContext *avctx= ctx;
if(codec && codec->priv_class){
if(!avctx->priv_data && codec->priv_data_size)
avctx->priv_data= alloc_priv_context(codec->priv_data_size, codec->priv_class);
priv_ctx= avctx->priv_data;
}
} else if (!strcmp("AVFormatContext", (*(AVClass**)ctx)->class_name)) {
AVFormatContext *avctx = ctx;
if (avctx->oformat && avctx->oformat->priv_class) {
priv_ctx = avctx->priv_data;
} else if (avctx->iformat && avctx->iformat->priv_class) {
priv_ctx = avctx->priv_data;
}
}
for(i=0; i<opt_name_count; i++){
char buf[256];
const AVOption *opt;
const char *str;
if (priv_ctx) {
if (av_find_opt(priv_ctx, opt_names[i], NULL, flags, flags)) {
if (av_set_string3(priv_ctx, opt_names[i], opt_values[i], 1, NULL) < 0) {
fprintf(stderr, "Invalid value '%s' for option '%s'\n",
opt_values[i], opt_names[i]);
exit(1);
}
} else
goto global;
} else {
global:
str = av_get_string(opts_ctx, opt_names[i], &opt, buf, sizeof(buf));
/* if an option with name opt_names[i] is present in opts_ctx then str is non-NULL */
if (str && ((opt->flags & flags) == flags))
av_set_string3(ctx, opt_names[i], str, 1, NULL);
}
}
}
void print_error(const char *filename, int err) void print_error(const char *filename, int err)
{ {
char errbuf[128]; char errbuf[128];
@ -934,3 +922,48 @@ FILE *get_preset_file(char *filename, size_t filename_size,
return f; return f;
} }
AVDictionary *filter_codec_opts(AVDictionary *opts, enum CodecID codec_id, int encoder)
{
AVDictionary *ret = NULL;
AVDictionaryEntry *t = NULL;
AVCodec *codec = encoder ? avcodec_find_encoder(codec_id) : avcodec_find_decoder(codec_id);
int flags = encoder ? AV_OPT_FLAG_ENCODING_PARAM : AV_OPT_FLAG_DECODING_PARAM;
char prefix = 0;
if (!codec)
return NULL;
switch (codec->type) {
case AVMEDIA_TYPE_VIDEO: prefix = 'v'; flags |= AV_OPT_FLAG_VIDEO_PARAM; break;
case AVMEDIA_TYPE_AUDIO: prefix = 'a'; flags |= AV_OPT_FLAG_AUDIO_PARAM; break;
case AVMEDIA_TYPE_SUBTITLE: prefix = 's'; flags |= AV_OPT_FLAG_SUBTITLE_PARAM; break;
}
while (t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX)) {
if (av_opt_find(avcodec_opts[0], t->key, NULL, flags, 0) ||
(codec && codec->priv_class && av_opt_find(&codec->priv_class, t->key, NULL, flags, 0)))
av_dict_set(&ret, t->key, t->value, 0);
else if (t->key[0] == prefix && av_opt_find(avcodec_opts[0], t->key+1, NULL, flags, 0))
av_dict_set(&ret, t->key+1, t->value, 0);
}
return ret;
}
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s)
{
int i;
AVDictionary **opts;
if (!s->nb_streams)
return NULL;
opts = av_mallocz(s->nb_streams * sizeof(*opts));
if (!opts) {
av_log(NULL, AV_LOG_ERROR, "Could not alloc memory for stream options.\n");
return NULL;
}
for (i = 0; i < s->nb_streams; i++)
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codec->codec_id, 0);
return opts;
}

View File

@ -43,11 +43,10 @@ extern const char program_name[];
*/ */
extern const int program_birth_year; extern const int program_birth_year;
extern const char **opt_names;
extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB]; extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
extern AVFormatContext *avformat_opts; extern AVFormatContext *avformat_opts;
extern struct SwsContext *sws_opts; extern struct SwsContext *sws_opts;
extern AVDictionary *format_opts, *video_opts, *audio_opts, *sub_opts; extern AVDictionary *format_opts, *codec_opts;
/** /**
* Initialize the cmdutils option system, in particular * Initialize the cmdutils option system, in particular
@ -153,7 +152,15 @@ void show_help_options(const OptionDef *options, const char *msg, int mask, int
void parse_options(int argc, char **argv, const OptionDef *options, void parse_options(int argc, char **argv, const OptionDef *options,
int (* parse_arg_function)(const char *opt, const char *arg)); int (* parse_arg_function)(const char *opt, const char *arg));
void set_context_opts(void *ctx, void *opts_ctx, int flags, AVCodec *codec); /**
* Filter out options for given codec.
*/
AVDictionary *filter_codec_opts(AVDictionary *opts, enum CodecID codec_id, int encoder);
/*
* Setup AVCodecContext options for avformat_find_stream_info.
*/
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s);
/** /**
* Print an error message to stderr, indicating filename and a human * Print an error message to stderr, indicating filename and a human

View File

@ -13,6 +13,12 @@ libavutil: 2011-04-18
API changes, most recent first: API changes, most recent first:
2011-07-10 - a67c061 - lavf 53.3.0
Add avformat_find_stream_info(), deprecate av_find_stream_info().
2011-07-10 - 0b950fe - lavc 53.6.0
Add avcodec_open2(), deprecate avcodec_open().
2011-07-01 - b442ca6 - lavf 53.5.0 - avformat.h 2011-07-01 - b442ca6 - lavf 53.5.0 - avformat.h
Add function av_get_output_timestamp(). Add function av_get_output_timestamp().
@ -49,20 +55,20 @@ API changes, most recent first:
2011-06-12 - xxxxxxx - lavfi 2.16.0 - avfilter_graph_parse() 2011-06-12 - xxxxxxx - lavfi 2.16.0 - avfilter_graph_parse()
Change avfilter_graph_parse() signature. Change avfilter_graph_parse() signature.
2011-06-xx - xxxxxxx - lavu 51.8.0 - attributes.h 2011-06-23 - 67e9ae1 - lavu 51.8.0 - attributes.h
Add av_printf_format(). Add av_printf_format().
2011-06-xx - xxxxxxx - lavf 53.2.0 - avformat.h 2011-06-16 - 05e84c9, 25de595 - lavf 53.2.0 - avformat.h
Add avformat_open_input and avformat_write_header(). Add avformat_open_input and avformat_write_header().
Deprecate av_open_input_stream, av_open_input_file, Deprecate av_open_input_stream, av_open_input_file,
AVFormatParameters and av_write_header. AVFormatParameters and av_write_header.
2011-06-xx - xxxxxxx - lavu 51.7.0 - opt.h 2011-06-16 - 7e83e1c, dc59ec5 - lavu 51.7.0 - opt.h
Add av_opt_set_dict() and av_opt_find(). Add av_opt_set_dict() and av_opt_find().
Deprecate av_find_opt(). Deprecate av_find_opt().
Add AV_DICT_APPEND flag. Add AV_DICT_APPEND flag.
2011-06-xx - xxxxxxx - lavu 51.6.0 - opt.h 2011-06-10 - cb7c11c - lavu 51.6.0 - opt.h
Add av_opt_flag_is_set(). Add av_opt_flag_is_set().
2011-06-10 - c381960 - lavfi 2.15.0 - avfilter_get_audio_buffer_ref_from_arrays 2011-06-10 - c381960 - lavfi 2.15.0 - avfilter_get_audio_buffer_ref_from_arrays

View File

@ -159,8 +159,6 @@ Set the ISO 639 language code (3 letters) of the current subtitle stream.
@section Video Options @section Video Options
@table @option @table @option
@item -b @var{bitrate}
Set the video bitrate in bit/s (default = 200 kb/s).
@item -vframes @var{number} @item -vframes @var{number}
Set the number of video frames to record. Set the number of video frames to record.
@item -r @var{fps} @item -r @var{fps}
@ -560,8 +558,6 @@ Set the audio sampling frequency. For output streams it is set by
default to the frequency of the corresponding input stream. For input default to the frequency of the corresponding input stream. For input
streams this option only makes sense for audio grabbing devices and raw streams this option only makes sense for audio grabbing devices and raw
demuxers and is mapped to the corresponding demuxer options. demuxers and is mapped to the corresponding demuxer options.
@item -ab @var{bitrate}
Set the audio bitrate in bit/s (default = 64k).
@item -aq @var{q} @item -aq @var{q}
Set the audio quality (codec-specific, VBR). Set the audio quality (codec-specific, VBR).
@item -ac @var{channels} @item -ac @var{channels}

106
ffmpeg.c
View File

@ -306,6 +306,7 @@ typedef struct OutputStream {
#endif #endif
int sws_flags; int sws_flags;
AVDictionary *opts;
} OutputStream; } OutputStream;
static OutputStream **output_streams_for_file[MAX_FILES] = { NULL }; static OutputStream **output_streams_for_file[MAX_FILES] = { NULL };
@ -326,6 +327,7 @@ typedef struct InputStream {
int is_start; /* is 1 at the start and after a discontinuity */ int is_start; /* is 1 at the start and after a discontinuity */
int showed_multi_packet_warning; int showed_multi_packet_warning;
int is_past_recording_time; int is_past_recording_time;
AVDictionary *opts;
} InputStream; } InputStream;
typedef struct InputFile { typedef struct InputFile {
@ -536,6 +538,8 @@ static int ffmpeg_exit(int ret)
for(i=0;i<nb_input_files;i++) { for(i=0;i<nb_input_files;i++) {
av_close_input_file(input_files[i].ctx); av_close_input_file(input_files[i].ctx);
} }
for (i = 0; i < nb_input_streams; i++)
av_dict_free(&input_streams[i].opts);
av_free(intra_matrix); av_free(intra_matrix);
av_free(inter_matrix); av_free(inter_matrix);
@ -586,6 +590,23 @@ static void assert_avoptions(AVDictionary *m)
} }
} }
static void assert_codec_experimental(AVCodecContext *c, int encoder)
{
const char *codec_string = encoder ? "encoder" : "decoder";
AVCodec *codec;
if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
av_log(NULL, AV_LOG_ERROR, "%s '%s' is experimental and might produce bad "
"results.\nAdd '-strict experimental' if you want to use it.\n",
codec_string, c->codec->name);
codec = encoder ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id);
if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
av_log(NULL, AV_LOG_ERROR, "Or use the non experimental %s '%s'.\n",
codec_string, codec->name);
ffmpeg_exit(1);
}
}
/* similar to ff_dynarray_add() and av_fast_realloc() */ /* similar to ff_dynarray_add() and av_fast_realloc() */
static void *grow_array(void *array, int elem_size, int *size, int new_size) static void *grow_array(void *array, int elem_size, int *size, int new_size)
{ {
@ -700,6 +721,8 @@ static OutputStream *new_output_stream(AVFormatContext *oc, int file_idx, AVCode
ost->index = idx; ost->index = idx;
ost->st = st; ost->st = st;
ost->enc = codec; ost->enc = codec;
if (codec)
ost->opts = filter_codec_opts(codec_opts, codec->id, 1);
avcodec_get_context_defaults3(st->codec, codec); avcodec_get_context_defaults3(st->codec, codec);
@ -2410,12 +2433,17 @@ static int transcode(AVFormatContext **output_files,
memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
ost->st->codec->subtitle_header_size = dec->subtitle_header_size; ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
} }
if (avcodec_open(ost->st->codec, codec) < 0) { if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height", snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height",
ost->file_index, ost->index); ost->file_index, ost->index);
ret = AVERROR(EINVAL); ret = AVERROR(EINVAL);
goto dump_format; goto dump_format;
} }
assert_codec_experimental(ost->st->codec, 1);
assert_avoptions(ost->opts);
if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
"It takes bits/s as argument, not kbits/s\n");
extra_size += ost->st->codec->extradata_size; extra_size += ost->st->codec->extradata_size;
} }
} }
@ -2433,12 +2461,14 @@ static int transcode(AVFormatContext **output_files,
ret = AVERROR(EINVAL); ret = AVERROR(EINVAL);
goto dump_format; goto dump_format;
} }
if (avcodec_open(ist->st->codec, codec) < 0) { if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d", snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d",
ist->file_index, ist->st->index); ist->file_index, ist->st->index);
ret = AVERROR(EINVAL); ret = AVERROR(EINVAL);
goto dump_format; goto dump_format;
} }
assert_codec_experimental(ist->st->codec, 0);
assert_avoptions(ost->opts);
//if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) //if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
// ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD; // ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD;
} }
@ -2859,6 +2889,7 @@ static int transcode(AVFormatContext **output_files,
audio_resample_close(ost->resample); audio_resample_close(ost->resample);
if (ost->reformat_ctx) if (ost->reformat_ctx)
av_audio_convert_free(ost->reformat_ctx); av_audio_convert_free(ost->reformat_ctx);
av_dict_free(&ost->opts);
av_free(ost); av_free(ost);
} }
} }
@ -2900,18 +2931,6 @@ static int opt_frame_rate(const char *opt, const char *arg)
return 0; return 0;
} }
static int opt_bitrate(const char *opt, const char *arg)
{
int codec_type = opt[0]=='a' ? AVMEDIA_TYPE_AUDIO : AVMEDIA_TYPE_VIDEO;
opt_default(opt, arg);
if (av_get_int(avcodec_opts[codec_type], "b", NULL) < 1000)
fprintf(stderr, "WARNING: The bitrate parameter is set too low. It takes bits/s as argument, not kbits/s\n");
return 0;
}
static int opt_frame_crop(const char *opt, const char *arg) static int opt_frame_crop(const char *opt, const char *arg)
{ {
fprintf(stderr, "Option '%s' has been removed, use the crop filter instead\n", opt); fprintf(stderr, "Option '%s' has been removed, use the crop filter instead\n", opt);
@ -3239,7 +3258,7 @@ static int opt_input_ts_offset(const char *opt, const char *arg)
return 0; return 0;
} }
static enum CodecID find_codec_or_die(const char *name, int type, int encoder, int strict) static enum CodecID find_codec_or_die(const char *name, int type, int encoder)
{ {
const char *codec_string = encoder ? "encoder" : "decoder"; const char *codec_string = encoder ? "encoder" : "decoder";
AVCodec *codec; AVCodec *codec;
@ -3257,19 +3276,6 @@ static enum CodecID find_codec_or_die(const char *name, int type, int encoder, i
fprintf(stderr, "Invalid %s type '%s'\n", codec_string, name); fprintf(stderr, "Invalid %s type '%s'\n", codec_string, name);
ffmpeg_exit(1); ffmpeg_exit(1);
} }
if(codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
strict > FF_COMPLIANCE_EXPERIMENTAL) {
fprintf(stderr, "%s '%s' is experimental and might produce bad "
"results.\nAdd '-strict experimental' if you want to use it.\n",
codec_string, codec->name);
codec = encoder ?
avcodec_find_encoder(codec->id) :
avcodec_find_decoder(codec->id);
if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
fprintf(stderr, "Or use the non experimental %s '%s'.\n",
codec_string, codec->name);
ffmpeg_exit(1);
}
return codec->id; return codec->id;
} }
@ -3280,6 +3286,8 @@ static int opt_input_file(const char *opt, const char *filename)
int err, i, ret, rfps, rfps_base; int err, i, ret, rfps, rfps_base;
int64_t timestamp; int64_t timestamp;
uint8_t buf[128]; uint8_t buf[128];
AVDictionary **opts;
int orig_nb_streams; // number of streams before avformat_find_stream_info
if (last_asked_format) { if (last_asked_format) {
if (!(file_iformat = av_find_input_format(last_asked_format))) { if (!(file_iformat = av_find_input_format(last_asked_format))) {
@ -3321,14 +3329,11 @@ static int opt_input_file(const char *opt, const char *filename)
av_dict_set(&format_opts, "pixel_format", av_get_pix_fmt_name(frame_pix_fmt), 0); av_dict_set(&format_opts, "pixel_format", av_get_pix_fmt_name(frame_pix_fmt), 0);
ic->video_codec_id = ic->video_codec_id =
find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0, find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0);
avcodec_opts[AVMEDIA_TYPE_VIDEO ]->strict_std_compliance);
ic->audio_codec_id = ic->audio_codec_id =
find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0, find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0);
avcodec_opts[AVMEDIA_TYPE_AUDIO ]->strict_std_compliance);
ic->subtitle_codec_id= ic->subtitle_codec_id=
find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0, find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0);
avcodec_opts[AVMEDIA_TYPE_SUBTITLE]->strict_std_compliance);
ic->flags |= AVFMT_FLAG_NONBLOCK; ic->flags |= AVFMT_FLAG_NONBLOCK;
/* open the input file with generic libav function */ /* open the input file with generic libav function */
@ -3368,9 +3373,13 @@ static int opt_input_file(const char *opt, const char *filename)
ic->loop_input = loop_input; ic->loop_input = loop_input;
} }
/* Set AVCodecContext options for avformat_find_stream_info */
opts = setup_find_stream_info_opts(ic);
orig_nb_streams = ic->nb_streams;
/* If not enough info to get the stream parameters, we decode the /* If not enough info to get the stream parameters, we decode the
first frames to get it. (used in mpeg case for example) */ first frames to get it. (used in mpeg case for example) */
ret = av_find_stream_info(ic); ret = avformat_find_stream_info(ic, opts);
if (ret < 0 && verbose >= 0) { if (ret < 0 && verbose >= 0) {
fprintf(stderr, "%s: could not find codec parameters\n", filename); fprintf(stderr, "%s: could not find codec parameters\n", filename);
av_close_input_file(ic); av_close_input_file(ic);
@ -3406,6 +3415,7 @@ static int opt_input_file(const char *opt, const char *filename)
ist->st = st; ist->st = st;
ist->file_index = nb_input_files; ist->file_index = nb_input_files;
ist->discard = 1; ist->discard = 1;
ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, 0);
if (i < nb_ts_scale) if (i < nb_ts_scale)
ist->ts_scale = ts_scale[i]; ist->ts_scale = ts_scale[i];
@ -3415,7 +3425,6 @@ static int opt_input_file(const char *opt, const char *filename)
ist->dec = avcodec_find_decoder_by_name(audio_codec_name); ist->dec = avcodec_find_decoder_by_name(audio_codec_name);
if(!ist->dec) if(!ist->dec)
ist->dec = avcodec_find_decoder(dec->codec_id); ist->dec = avcodec_find_decoder(dec->codec_id);
set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO], AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM, ist->dec);
if(audio_disable) if(audio_disable)
st->discard= AVDISCARD_ALL; st->discard= AVDISCARD_ALL;
break; break;
@ -3423,7 +3432,6 @@ static int opt_input_file(const char *opt, const char *filename)
ist->dec= avcodec_find_decoder_by_name(video_codec_name); ist->dec= avcodec_find_decoder_by_name(video_codec_name);
if(!ist->dec) if(!ist->dec)
ist->dec = avcodec_find_decoder(dec->codec_id); ist->dec = avcodec_find_decoder(dec->codec_id);
set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM, ist->dec);
rfps = ic->streams[i]->r_frame_rate.num; rfps = ic->streams[i]->r_frame_rate.num;
rfps_base = ic->streams[i]->r_frame_rate.den; rfps_base = ic->streams[i]->r_frame_rate.den;
if (dec->lowres) { if (dec->lowres) {
@ -3485,6 +3493,9 @@ static int opt_input_file(const char *opt, const char *filename)
av_freep(&ts_scale); av_freep(&ts_scale);
nb_ts_scale = 0; nb_ts_scale = 0;
for (i = 0; i < orig_nb_streams; i++)
av_dict_free(&opts[i]);
av_freep(&opts);
av_freep(&video_codec_name); av_freep(&video_codec_name);
av_freep(&audio_codec_name); av_freep(&audio_codec_name);
av_freep(&subtitle_codec_name); av_freep(&subtitle_codec_name);
@ -3546,8 +3557,7 @@ static void new_video_stream(AVFormatContext *oc, int file_idx)
if(!video_stream_copy){ if(!video_stream_copy){
if (video_codec_name) { if (video_codec_name) {
codec_id = find_codec_or_die(video_codec_name, AVMEDIA_TYPE_VIDEO, 1, codec_id = find_codec_or_die(video_codec_name, AVMEDIA_TYPE_VIDEO, 1);
avcodec_opts[AVMEDIA_TYPE_VIDEO]->strict_std_compliance);
codec = avcodec_find_encoder_by_name(video_codec_name); codec = avcodec_find_encoder_by_name(video_codec_name);
} else { } else {
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO); codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO);
@ -3578,12 +3588,11 @@ static void new_video_stream(AVFormatContext *oc, int file_idx)
if(oc->oformat->flags & AVFMT_GLOBALHEADER) { if(oc->oformat->flags & AVFMT_GLOBALHEADER) {
video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
avcodec_opts[AVMEDIA_TYPE_VIDEO]->flags|= CODEC_FLAG_GLOBAL_HEADER;
} }
video_enc->codec_type = AVMEDIA_TYPE_VIDEO;
if (video_stream_copy) { if (video_stream_copy) {
st->stream_copy = 1; st->stream_copy = 1;
video_enc->codec_type = AVMEDIA_TYPE_VIDEO;
video_enc->sample_aspect_ratio = video_enc->sample_aspect_ratio =
st->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255); st->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255);
} else { } else {
@ -3593,7 +3602,6 @@ static void new_video_stream(AVFormatContext *oc, int file_idx)
if (frame_rate.num) if (frame_rate.num)
ost->frame_rate = frame_rate; ost->frame_rate = frame_rate;
video_enc->codec_id = codec_id; video_enc->codec_id = codec_id;
set_context_opts(video_enc, avcodec_opts[AVMEDIA_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, codec);
video_enc->width = frame_width; video_enc->width = frame_width;
video_enc->height = frame_height; video_enc->height = frame_height;
@ -3681,8 +3689,7 @@ static void new_audio_stream(AVFormatContext *oc, int file_idx)
if(!audio_stream_copy){ if(!audio_stream_copy){
if (audio_codec_name) { if (audio_codec_name) {
codec_id = find_codec_or_die(audio_codec_name, AVMEDIA_TYPE_AUDIO, 1, codec_id = find_codec_or_die(audio_codec_name, AVMEDIA_TYPE_AUDIO, 1);
avcodec_opts[AVMEDIA_TYPE_AUDIO]->strict_std_compliance);
codec = avcodec_find_encoder_by_name(audio_codec_name); codec = avcodec_find_encoder_by_name(audio_codec_name);
} else { } else {
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_AUDIO); codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_AUDIO);
@ -3705,13 +3712,11 @@ static void new_audio_stream(AVFormatContext *oc, int file_idx)
if (oc->oformat->flags & AVFMT_GLOBALHEADER) { if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
avcodec_opts[AVMEDIA_TYPE_AUDIO]->flags|= CODEC_FLAG_GLOBAL_HEADER;
} }
if (audio_stream_copy) { if (audio_stream_copy) {
st->stream_copy = 1; st->stream_copy = 1;
} else { } else {
audio_enc->codec_id = codec_id; audio_enc->codec_id = codec_id;
set_context_opts(audio_enc, avcodec_opts[AVMEDIA_TYPE_AUDIO], AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, codec);
if (audio_qscale > QSCALE_NONE) { if (audio_qscale > QSCALE_NONE) {
audio_enc->flags |= CODEC_FLAG_QSCALE; audio_enc->flags |= CODEC_FLAG_QSCALE;
@ -3756,7 +3761,6 @@ static void new_data_stream(AVFormatContext *oc, int file_idx)
if (oc->oformat->flags & AVFMT_GLOBALHEADER) { if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
data_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; data_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
avcodec_opts[AVMEDIA_TYPE_DATA]->flags |= CODEC_FLAG_GLOBAL_HEADER;
} }
if (data_stream_copy) { if (data_stream_copy) {
st->stream_copy = 1; st->stream_copy = 1;
@ -3777,8 +3781,7 @@ static void new_subtitle_stream(AVFormatContext *oc, int file_idx)
if(!subtitle_stream_copy){ if(!subtitle_stream_copy){
if (subtitle_codec_name) { if (subtitle_codec_name) {
codec_id = find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 1, codec_id = find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 1);
avcodec_opts[AVMEDIA_TYPE_SUBTITLE]->strict_std_compliance);
codec = avcodec_find_encoder_by_name(subtitle_codec_name); codec = avcodec_find_encoder_by_name(subtitle_codec_name);
} else { } else {
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_SUBTITLE); codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_SUBTITLE);
@ -3799,13 +3802,11 @@ static void new_subtitle_stream(AVFormatContext *oc, int file_idx)
if (oc->oformat->flags & AVFMT_GLOBALHEADER) { if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
subtitle_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; subtitle_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
avcodec_opts[AVMEDIA_TYPE_SUBTITLE]->flags |= CODEC_FLAG_GLOBAL_HEADER;
} }
if (subtitle_stream_copy) { if (subtitle_stream_copy) {
st->stream_copy = 1; st->stream_copy = 1;
} else { } else {
subtitle_enc->codec_id = codec_id; subtitle_enc->codec_id = codec_id;
set_context_opts(avcodec_opts[AVMEDIA_TYPE_SUBTITLE], subtitle_enc, AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_ENCODING_PARAM, codec);
} }
if (subtitle_language) { if (subtitle_language) {
@ -4416,8 +4417,6 @@ static const OptionDef options[] = {
{ "copyinkf", OPT_BOOL | OPT_EXPERT, {(void*)&copy_initial_nonkeyframes}, "copy initial non-keyframes" }, { "copyinkf", OPT_BOOL | OPT_EXPERT, {(void*)&copy_initial_nonkeyframes}, "copy initial non-keyframes" },
/* video options */ /* video options */
{ "b", HAS_ARG | OPT_VIDEO, {(void*)opt_bitrate}, "set bitrate (in bits/s)", "bitrate" },
{ "vb", HAS_ARG | OPT_VIDEO, {(void*)opt_bitrate}, "set bitrate (in bits/s)", "bitrate" },
{ "vframes", OPT_INT | HAS_ARG | OPT_VIDEO, {(void*)&max_frames[AVMEDIA_TYPE_VIDEO]}, "set the number of video frames to record", "number" }, { "vframes", OPT_INT | HAS_ARG | OPT_VIDEO, {(void*)&max_frames[AVMEDIA_TYPE_VIDEO]}, "set the number of video frames to record", "number" },
{ "r", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_rate}, "set frame rate (Hz value, fraction or abbreviation)", "rate" }, { "r", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_rate}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
{ "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" }, { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
@ -4465,7 +4464,6 @@ static const OptionDef options[] = {
{ "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void *)&forced_key_frames}, "force key frames at specified timestamps", "timestamps" }, { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void *)&forced_key_frames}, "force key frames at specified timestamps", "timestamps" },
/* audio options */ /* audio options */
{ "ab", HAS_ARG | OPT_AUDIO, {(void*)opt_bitrate}, "set bitrate (in bits/s)", "bitrate" },
{ "aframes", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&max_frames[AVMEDIA_TYPE_AUDIO]}, "set the number of audio frames to record", "number" }, { "aframes", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&max_frames[AVMEDIA_TYPE_AUDIO]}, "set the number of audio frames to record", "number" },
{ "aq", OPT_FLOAT | HAS_ARG | OPT_AUDIO, {(void*)&audio_qscale}, "set audio quality (codec-specific)", "quality", }, { "aq", OPT_FLOAT | HAS_ARG | OPT_AUDIO, {(void*)&audio_qscale}, "set audio quality (codec-specific)", "quality", },
{ "ar", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_rate}, "set audio sampling rate (in Hz)", "rate" }, { "ar", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_rate}, "set audio sampling rate (in Hz)", "rate" },

View File

@ -2111,11 +2111,15 @@ static int stream_component_open(VideoState *is, int stream_index)
AVCodecContext *avctx; AVCodecContext *avctx;
AVCodec *codec; AVCodec *codec;
SDL_AudioSpec wanted_spec, spec; SDL_AudioSpec wanted_spec, spec;
AVDictionary *opts;
AVDictionaryEntry *t = NULL;
if (stream_index < 0 || stream_index >= ic->nb_streams) if (stream_index < 0 || stream_index >= ic->nb_streams)
return -1; return -1;
avctx = ic->streams[stream_index]->codec; avctx = ic->streams[stream_index]->codec;
opts = filter_codec_opts(codec_opts, avctx->codec_id, 0);
/* prepare audio output */ /* prepare audio output */
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
if (avctx->channels > 0) { if (avctx->channels > 0) {
@ -2141,13 +2145,16 @@ static int stream_component_open(VideoState *is, int stream_index)
avctx->error_concealment= error_concealment; avctx->error_concealment= error_concealment;
avctx->thread_count= thread_count; avctx->thread_count= thread_count;
set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
if(codec->capabilities & CODEC_CAP_DR1) if(codec->capabilities & CODEC_CAP_DR1)
avctx->flags |= CODEC_FLAG_EMU_EDGE; avctx->flags |= CODEC_FLAG_EMU_EDGE;
if (avcodec_open(avctx, codec) < 0) if (!codec ||
avcodec_open2(avctx, codec, &opts) < 0)
return -1; return -1;
if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
return AVERROR_OPTION_NOT_FOUND;
}
/* prepare audio output */ /* prepare audio output */
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
@ -2301,6 +2308,8 @@ static int read_thread(void *arg)
int eof=0; int eof=0;
int pkt_in_play_range = 0; int pkt_in_play_range = 0;
AVDictionaryEntry *t; AVDictionaryEntry *t;
AVDictionary **opts;
int orig_nb_streams;
memset(st_index, -1, sizeof(st_index)); memset(st_index, -1, sizeof(st_index));
is->video_stream = -1; is->video_stream = -1;
@ -2326,12 +2335,19 @@ static int read_thread(void *arg)
if(genpts) if(genpts)
ic->flags |= AVFMT_FLAG_GENPTS; ic->flags |= AVFMT_FLAG_GENPTS;
err = av_find_stream_info(ic); opts = setup_find_stream_info_opts(ic);
orig_nb_streams = ic->nb_streams;
err = avformat_find_stream_info(ic, opts);
if (err < 0) { if (err < 0) {
fprintf(stderr, "%s: could not find codec parameters\n", is->filename); fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
ret = -1; ret = -1;
goto fail; goto fail;
} }
for (i = 0; i < orig_nb_streams; i++)
av_dict_free(&opts[i]);
av_freep(&opts);
if(ic->pb) if(ic->pb)
ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end

View File

@ -69,19 +69,19 @@ static const int huff_iid[] = {
static VLC vlc_ps[10]; static VLC vlc_ps[10];
/**
* Read Inter-channel Intensity Difference/Inter-Channel Coherence/
* Inter-channel Phase Difference/Overall Phase Difference parameters from the
* bitstream.
*
* @param avctx contains the current codec context
* @param gb pointer to the input bitstream
* @param ps pointer to the Parametric Stereo context
* @param PAR pointer to the parameter to be read
* @param e envelope to decode
* @param dt 1: time delta-coded, 0: frequency delta-coded
*/
#define READ_PAR_DATA(PAR, OFFSET, MASK, ERR_CONDITION) \ #define READ_PAR_DATA(PAR, OFFSET, MASK, ERR_CONDITION) \
/** \
* Read Inter-channel Intensity Difference/Inter-Channel Coherence/ \
* Inter-channel Phase Difference/Overall Phase Difference parameters from the \
* bitstream. \
* \
* @param avctx contains the current codec context \
* @param gb pointer to the input bitstream \
* @param ps pointer to the Parametric Stereo context \
* @param PAR pointer to the parameter to be read \
* @param e envelope to decode \
* @param dt 1: time delta-coded, 0: frequency delta-coded \
*/ \
static int read_ ## PAR ## _data(AVCodecContext *avctx, GetBitContext *gb, PSContext *ps, \ static int read_ ## PAR ## _data(AVCodecContext *avctx, GetBitContext *gb, PSContext *ps, \
int8_t (*PAR)[PS_MAX_NR_IIDICC], int table_idx, int e, int dt) \ int8_t (*PAR)[PS_MAX_NR_IIDICC], int table_idx, int e, int dt) \
{ \ { \

View File

@ -770,424 +770,6 @@ static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale){
return h->pps.chroma_qp_table[t][qscale]; return h->pps.chroma_qp_table[t][qscale];
} }
static void fill_decode_neighbors(H264Context *h, int mb_type){
MpegEncContext * const s = &h->s;
const int mb_xy= h->mb_xy;
int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
static const uint8_t left_block_options[4][32]={
{0,1,2,3,7,10,8,11,3+0*4, 3+1*4, 3+2*4, 3+3*4, 1+4*4, 1+8*4, 1+5*4, 1+9*4},
{2,2,3,3,8,11,8,11,3+2*4, 3+2*4, 3+3*4, 3+3*4, 1+5*4, 1+9*4, 1+5*4, 1+9*4},
{0,0,1,1,7,10,7,10,3+0*4, 3+0*4, 3+1*4, 3+1*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4},
{0,2,0,2,7,10,7,10,3+0*4, 3+2*4, 3+0*4, 3+2*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4}
};
h->topleft_partition= -1;
top_xy = mb_xy - (s->mb_stride << MB_FIELD);
/* Wow, what a mess, why didn't they simplify the interlacing & intra
* stuff, I can't imagine that these complex rules are worth it. */
topleft_xy = top_xy - 1;
topright_xy= top_xy + 1;
left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
h->left_block = left_block_options[0];
if(FRAME_MBAFF){
const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if(s->mb_y&1){
if (left_mb_field_flag != curr_mb_field_flag) {
left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1;
if (curr_mb_field_flag) {
left_xy[LBOT] += s->mb_stride;
h->left_block = left_block_options[3];
} else {
topleft_xy += s->mb_stride;
// take top left mv from the middle of the mb, as opposed to all other modes which use the bottom right partition
h->topleft_partition = 0;
h->left_block = left_block_options[1];
}
}
}else{
if(curr_mb_field_flag){
topleft_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy - 1] >> 7) & 1) - 1);
topright_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy + 1] >> 7) & 1) - 1);
top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy ] >> 7) & 1) - 1);
}
if (left_mb_field_flag != curr_mb_field_flag) {
if (curr_mb_field_flag) {
left_xy[LBOT] += s->mb_stride;
h->left_block = left_block_options[3];
} else {
h->left_block = left_block_options[2];
}
}
}
}
h->topleft_mb_xy = topleft_xy;
h->top_mb_xy = top_xy;
h->topright_mb_xy= topright_xy;
h->left_mb_xy[LTOP] = left_xy[LTOP];
h->left_mb_xy[LBOT] = left_xy[LBOT];
//FIXME do we need all in the context?
h->topleft_type = s->current_picture.f.mb_type[topleft_xy];
h->top_type = s->current_picture.f.mb_type[top_xy];
h->topright_type = s->current_picture.f.mb_type[topright_xy];
h->left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
h->left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
if(FMO){
if(h->slice_table[topleft_xy ] != h->slice_num) h->topleft_type = 0;
if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0;
if(h->slice_table[left_xy[LTOP] ] != h->slice_num) h->left_type[LTOP] = h->left_type[LBOT] = 0;
}else{
if(h->slice_table[topleft_xy ] != h->slice_num){
h->topleft_type = 0;
if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0;
if(h->slice_table[left_xy[LTOP] ] != h->slice_num) h->left_type[LTOP] = h->left_type[LBOT] = 0;
}
}
if(h->slice_table[topright_xy] != h->slice_num) h->topright_type= 0;
}
static void fill_decode_caches(H264Context *h, int mb_type){
MpegEncContext * const s = &h->s;
int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
int topleft_type, top_type, topright_type, left_type[LEFT_MBS];
const uint8_t * left_block= h->left_block;
int i;
uint8_t *nnz;
uint8_t *nnz_cache;
topleft_xy = h->topleft_mb_xy;
top_xy = h->top_mb_xy;
topright_xy = h->topright_mb_xy;
left_xy[LTOP] = h->left_mb_xy[LTOP];
left_xy[LBOT] = h->left_mb_xy[LBOT];
topleft_type = h->topleft_type;
top_type = h->top_type;
topright_type = h->topright_type;
left_type[LTOP]= h->left_type[LTOP];
left_type[LBOT]= h->left_type[LBOT];
if(!IS_SKIP(mb_type)){
if(IS_INTRA(mb_type)){
int type_mask= h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1;
h->topleft_samples_available=
h->top_samples_available=
h->left_samples_available= 0xFFFF;
h->topright_samples_available= 0xEEEA;
if(!(top_type & type_mask)){
h->topleft_samples_available= 0xB3FF;
h->top_samples_available= 0x33FF;
h->topright_samples_available= 0x26EA;
}
if(IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[LTOP])){
if(IS_INTERLACED(mb_type)){
if(!(left_type[LTOP] & type_mask)){
h->topleft_samples_available&= 0xDFFF;
h->left_samples_available&= 0x5FFF;
}
if(!(left_type[LBOT] & type_mask)){
h->topleft_samples_available&= 0xFF5F;
h->left_samples_available&= 0xFF5F;
}
}else{
int left_typei = s->current_picture.f.mb_type[left_xy[LTOP] + s->mb_stride];
assert(left_xy[LTOP] == left_xy[LBOT]);
if(!((left_typei & type_mask) && (left_type[LTOP] & type_mask))){
h->topleft_samples_available&= 0xDF5F;
h->left_samples_available&= 0x5F5F;
}
}
}else{
if(!(left_type[LTOP] & type_mask)){
h->topleft_samples_available&= 0xDF5F;
h->left_samples_available&= 0x5F5F;
}
}
if(!(topleft_type & type_mask))
h->topleft_samples_available&= 0x7FFF;
if(!(topright_type & type_mask))
h->topright_samples_available&= 0xFBFF;
if(IS_INTRA4x4(mb_type)){
if(IS_INTRA4x4(top_type)){
AV_COPY32(h->intra4x4_pred_mode_cache+4+8*0, h->intra4x4_pred_mode + h->mb2br_xy[top_xy]);
}else{
h->intra4x4_pred_mode_cache[4+8*0]=
h->intra4x4_pred_mode_cache[5+8*0]=
h->intra4x4_pred_mode_cache[6+8*0]=
h->intra4x4_pred_mode_cache[7+8*0]= 2 - 3*!(top_type & type_mask);
}
for(i=0; i<2; i++){
if(IS_INTRA4x4(left_type[LEFT(i)])){
int8_t *mode= h->intra4x4_pred_mode + h->mb2br_xy[left_xy[LEFT(i)]];
h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= mode[6-left_block[0+2*i]];
h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= mode[6-left_block[1+2*i]];
}else{
h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= 2 - 3*!(left_type[LEFT(i)] & type_mask);
}
}
}
}
/*
0 . T T. T T T T
1 L . .L . . . .
2 L . .L . . . .
3 . T TL . . . .
4 L . .L . . . .
5 L . .. . . . .
*/
//FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec)
nnz_cache = h->non_zero_count_cache;
if(top_type){
nnz = h->non_zero_count[top_xy];
AV_COPY32(&nnz_cache[4+8* 0], &nnz[4*3]);
if(CHROMA444){
AV_COPY32(&nnz_cache[4+8* 5], &nnz[4* 7]);
AV_COPY32(&nnz_cache[4+8*10], &nnz[4*11]);
}else{
AV_COPY32(&nnz_cache[4+8* 5], &nnz[4* 5]);
AV_COPY32(&nnz_cache[4+8*10], &nnz[4* 9]);
}
}else{
uint32_t top_empty = CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040;
AV_WN32A(&nnz_cache[4+8* 0], top_empty);
AV_WN32A(&nnz_cache[4+8* 5], top_empty);
AV_WN32A(&nnz_cache[4+8*10], top_empty);
}
for (i=0; i<2; i++) {
if(left_type[LEFT(i)]){
nnz = h->non_zero_count[left_xy[LEFT(i)]];
nnz_cache[3+8* 1 + 2*8*i]= nnz[left_block[8+0+2*i]];
nnz_cache[3+8* 2 + 2*8*i]= nnz[left_block[8+1+2*i]];
if(CHROMA444){
nnz_cache[3+8* 6 + 2*8*i]= nnz[left_block[8+0+2*i]+4*4];
nnz_cache[3+8* 7 + 2*8*i]= nnz[left_block[8+1+2*i]+4*4];
nnz_cache[3+8*11 + 2*8*i]= nnz[left_block[8+0+2*i]+8*4];
nnz_cache[3+8*12 + 2*8*i]= nnz[left_block[8+1+2*i]+8*4];
}else{
nnz_cache[3+8* 6 + 8*i]= nnz[left_block[8+4+2*i]];
nnz_cache[3+8*11 + 8*i]= nnz[left_block[8+5+2*i]];
}
}else{
nnz_cache[3+8* 1 + 2*8*i]=
nnz_cache[3+8* 2 + 2*8*i]=
nnz_cache[3+8* 6 + 2*8*i]=
nnz_cache[3+8* 7 + 2*8*i]=
nnz_cache[3+8*11 + 2*8*i]=
nnz_cache[3+8*12 + 2*8*i]= CABAC && !IS_INTRA(mb_type) ? 0 : 64;
}
}
if( CABAC ) {
// top_cbp
if(top_type) {
h->top_cbp = h->cbp_table[top_xy];
} else {
h->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
}
// left_cbp
if (left_type[LTOP]) {
h->left_cbp = (h->cbp_table[left_xy[LTOP]] & 0x7F0)
| ((h->cbp_table[left_xy[LTOP]]>>(left_block[0]&(~1)))&2)
| (((h->cbp_table[left_xy[LBOT]]>>(left_block[2]&(~1)))&2) << 2);
} else {
h->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
}
}
}
if(IS_INTER(mb_type) || (IS_DIRECT(mb_type) && h->direct_spatial_mv_pred)){
int list;
int b_stride = h->b_stride;
for(list=0; list<h->list_count; list++){
int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
int8_t *ref = s->current_picture.f.ref_index[list];
int16_t (*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
int16_t (*mv)[2] = s->current_picture.f.motion_val[list];
if(!USES_LIST(mb_type, list)){
continue;
}
assert(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred));
if(USES_LIST(top_type, list)){
const int b_xy= h->mb2b_xy[top_xy] + 3*b_stride;
AV_COPY128(mv_cache[0 - 1*8], mv[b_xy + 0]);
ref_cache[0 - 1*8]=
ref_cache[1 - 1*8]= ref[4*top_xy + 2];
ref_cache[2 - 1*8]=
ref_cache[3 - 1*8]= ref[4*top_xy + 3];
}else{
AV_ZERO128(mv_cache[0 - 1*8]);
AV_WN32A(&ref_cache[0 - 1*8], ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101);
}
if(mb_type & (MB_TYPE_16x8|MB_TYPE_8x8)){
for(i=0; i<2; i++){
int cache_idx = -1 + i*2*8;
if(USES_LIST(left_type[LEFT(i)], list)){
const int b_xy= h->mb2b_xy[left_xy[LEFT(i)]] + 3;
const int b8_xy= 4*left_xy[LEFT(i)] + 1;
AV_COPY32(mv_cache[cache_idx ], mv[b_xy + b_stride*left_block[0+i*2]]);
AV_COPY32(mv_cache[cache_idx+8], mv[b_xy + b_stride*left_block[1+i*2]]);
ref_cache[cache_idx ]= ref[b8_xy + (left_block[0+i*2]&~1)];
ref_cache[cache_idx+8]= ref[b8_xy + (left_block[1+i*2]&~1)];
}else{
AV_ZERO32(mv_cache[cache_idx ]);
AV_ZERO32(mv_cache[cache_idx+8]);
ref_cache[cache_idx ]=
ref_cache[cache_idx+8]= (left_type[LEFT(i)]) ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
}
}else{
if(USES_LIST(left_type[LTOP], list)){
const int b_xy= h->mb2b_xy[left_xy[LTOP]] + 3;
const int b8_xy= 4*left_xy[LTOP] + 1;
AV_COPY32(mv_cache[-1], mv[b_xy + b_stride*left_block[0]]);
ref_cache[-1]= ref[b8_xy + (left_block[0]&~1)];
}else{
AV_ZERO32(mv_cache[-1]);
ref_cache[-1]= left_type[LTOP] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
}
if(USES_LIST(topright_type, list)){
const int b_xy= h->mb2b_xy[topright_xy] + 3*b_stride;
AV_COPY32(mv_cache[4 - 1*8], mv[b_xy]);
ref_cache[4 - 1*8]= ref[4*topright_xy + 2];
}else{
AV_ZERO32(mv_cache[4 - 1*8]);
ref_cache[4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
if(ref_cache[4 - 1*8] < 0){
if(USES_LIST(topleft_type, list)){
const int b_xy = h->mb2b_xy[topleft_xy] + 3 + b_stride + (h->topleft_partition & 2*b_stride);
const int b8_xy= 4*topleft_xy + 1 + (h->topleft_partition & 2);
AV_COPY32(mv_cache[-1 - 1*8], mv[b_xy]);
ref_cache[-1 - 1*8]= ref[b8_xy];
}else{
AV_ZERO32(mv_cache[-1 - 1*8]);
ref_cache[-1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
}
if((mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2)) && !FRAME_MBAFF)
continue;
if(!(mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2))){
uint8_t (*mvd_cache)[2] = &h->mvd_cache[list][scan8[0]];
uint8_t (*mvd)[2] = h->mvd_table[list];
ref_cache[2+8*0] =
ref_cache[2+8*2] = PART_NOT_AVAILABLE;
AV_ZERO32(mv_cache[2+8*0]);
AV_ZERO32(mv_cache[2+8*2]);
if( CABAC ) {
if(USES_LIST(top_type, list)){
const int b_xy= h->mb2br_xy[top_xy];
AV_COPY64(mvd_cache[0 - 1*8], mvd[b_xy + 0]);
}else{
AV_ZERO64(mvd_cache[0 - 1*8]);
}
if(USES_LIST(left_type[LTOP], list)){
const int b_xy= h->mb2br_xy[left_xy[LTOP]] + 6;
AV_COPY16(mvd_cache[-1 + 0*8], mvd[b_xy - left_block[0]]);
AV_COPY16(mvd_cache[-1 + 1*8], mvd[b_xy - left_block[1]]);
}else{
AV_ZERO16(mvd_cache[-1 + 0*8]);
AV_ZERO16(mvd_cache[-1 + 1*8]);
}
if(USES_LIST(left_type[LBOT], list)){
const int b_xy= h->mb2br_xy[left_xy[LBOT]] + 6;
AV_COPY16(mvd_cache[-1 + 2*8], mvd[b_xy - left_block[2]]);
AV_COPY16(mvd_cache[-1 + 3*8], mvd[b_xy - left_block[3]]);
}else{
AV_ZERO16(mvd_cache[-1 + 2*8]);
AV_ZERO16(mvd_cache[-1 + 3*8]);
}
AV_ZERO16(mvd_cache[2+8*0]);
AV_ZERO16(mvd_cache[2+8*2]);
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
uint8_t *direct_cache = &h->direct_cache[scan8[0]];
uint8_t *direct_table = h->direct_table;
fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16>>1, 1);
if(IS_DIRECT(top_type)){
AV_WN32A(&direct_cache[-1*8], 0x01010101u*(MB_TYPE_DIRECT2>>1));
}else if(IS_8X8(top_type)){
int b8_xy = 4*top_xy;
direct_cache[0 - 1*8]= direct_table[b8_xy + 2];
direct_cache[2 - 1*8]= direct_table[b8_xy + 3];
}else{
AV_WN32A(&direct_cache[-1*8], 0x01010101*(MB_TYPE_16x16>>1));
}
if(IS_DIRECT(left_type[LTOP]))
direct_cache[-1 + 0*8]= MB_TYPE_DIRECT2>>1;
else if(IS_8X8(left_type[LTOP]))
direct_cache[-1 + 0*8]= direct_table[4*left_xy[LTOP] + 1 + (left_block[0]&~1)];
else
direct_cache[-1 + 0*8]= MB_TYPE_16x16>>1;
if(IS_DIRECT(left_type[LBOT]))
direct_cache[-1 + 2*8]= MB_TYPE_DIRECT2>>1;
else if(IS_8X8(left_type[LBOT]))
direct_cache[-1 + 2*8]= direct_table[4*left_xy[LBOT] + 1 + (left_block[2]&~1)];
else
direct_cache[-1 + 2*8]= MB_TYPE_16x16>>1;
}
}
}
if(FRAME_MBAFF){
#define MAP_MVS\
MAP_F2F(scan8[0] - 1 - 1*8, topleft_type)\
MAP_F2F(scan8[0] + 0 - 1*8, top_type)\
MAP_F2F(scan8[0] + 1 - 1*8, top_type)\
MAP_F2F(scan8[0] + 2 - 1*8, top_type)\
MAP_F2F(scan8[0] + 3 - 1*8, top_type)\
MAP_F2F(scan8[0] + 4 - 1*8, topright_type)\
MAP_F2F(scan8[0] - 1 + 0*8, left_type[LTOP])\
MAP_F2F(scan8[0] - 1 + 1*8, left_type[LTOP])\
MAP_F2F(scan8[0] - 1 + 2*8, left_type[LBOT])\
MAP_F2F(scan8[0] - 1 + 3*8, left_type[LBOT])
if(MB_FIELD){
#define MAP_F2F(idx, mb_type)\
if(!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
h->ref_cache[list][idx] <<= 1;\
h->mv_cache[list][idx][1] /= 2;\
h->mvd_cache[list][idx][1] >>=1;\
}
MAP_MVS
#undef MAP_F2F
}else{
#define MAP_F2F(idx, mb_type)\
if(IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
h->ref_cache[list][idx] >>= 1;\
h->mv_cache[list][idx][1] <<= 1;\
h->mvd_cache[list][idx][1] <<= 1;\
}
MAP_MVS
#undef MAP_F2F
}
}
}
}
h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[LTOP]);
}
/** /**
* gets the predicted intra4x4 prediction mode. * gets the predicted intra4x4 prediction mode.
*/ */

View File

@ -327,6 +327,424 @@ zeromv:
return; return;
} }
static void fill_decode_neighbors(H264Context *h, int mb_type){
MpegEncContext * const s = &h->s;
const int mb_xy= h->mb_xy;
int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
static const uint8_t left_block_options[4][32]={
{0,1,2,3,7,10,8,11,3+0*4, 3+1*4, 3+2*4, 3+3*4, 1+4*4, 1+8*4, 1+5*4, 1+9*4},
{2,2,3,3,8,11,8,11,3+2*4, 3+2*4, 3+3*4, 3+3*4, 1+5*4, 1+9*4, 1+5*4, 1+9*4},
{0,0,1,1,7,10,7,10,3+0*4, 3+0*4, 3+1*4, 3+1*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4},
{0,2,0,2,7,10,7,10,3+0*4, 3+2*4, 3+0*4, 3+2*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4}
};
h->topleft_partition= -1;
top_xy = mb_xy - (s->mb_stride << MB_FIELD);
/* Wow, what a mess, why didn't they simplify the interlacing & intra
* stuff, I can't imagine that these complex rules are worth it. */
topleft_xy = top_xy - 1;
topright_xy= top_xy + 1;
left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
h->left_block = left_block_options[0];
if(FRAME_MBAFF){
const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if(s->mb_y&1){
if (left_mb_field_flag != curr_mb_field_flag) {
left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1;
if (curr_mb_field_flag) {
left_xy[LBOT] += s->mb_stride;
h->left_block = left_block_options[3];
} else {
topleft_xy += s->mb_stride;
// take top left mv from the middle of the mb, as opposed to all other modes which use the bottom right partition
h->topleft_partition = 0;
h->left_block = left_block_options[1];
}
}
}else{
if(curr_mb_field_flag){
topleft_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy - 1] >> 7) & 1) - 1);
topright_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy + 1] >> 7) & 1) - 1);
top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy ] >> 7) & 1) - 1);
}
if (left_mb_field_flag != curr_mb_field_flag) {
if (curr_mb_field_flag) {
left_xy[LBOT] += s->mb_stride;
h->left_block = left_block_options[3];
} else {
h->left_block = left_block_options[2];
}
}
}
}
h->topleft_mb_xy = topleft_xy;
h->top_mb_xy = top_xy;
h->topright_mb_xy= topright_xy;
h->left_mb_xy[LTOP] = left_xy[LTOP];
h->left_mb_xy[LBOT] = left_xy[LBOT];
//FIXME do we need all in the context?
h->topleft_type = s->current_picture.f.mb_type[topleft_xy];
h->top_type = s->current_picture.f.mb_type[top_xy];
h->topright_type = s->current_picture.f.mb_type[topright_xy];
h->left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
h->left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
if(FMO){
if(h->slice_table[topleft_xy ] != h->slice_num) h->topleft_type = 0;
if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0;
if(h->slice_table[left_xy[LTOP] ] != h->slice_num) h->left_type[LTOP] = h->left_type[LBOT] = 0;
}else{
if(h->slice_table[topleft_xy ] != h->slice_num){
h->topleft_type = 0;
if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0;
if(h->slice_table[left_xy[LTOP] ] != h->slice_num) h->left_type[LTOP] = h->left_type[LBOT] = 0;
}
}
if(h->slice_table[topright_xy] != h->slice_num) h->topright_type= 0;
}
static void fill_decode_caches(H264Context *h, int mb_type){
MpegEncContext * const s = &h->s;
int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
int topleft_type, top_type, topright_type, left_type[LEFT_MBS];
const uint8_t * left_block= h->left_block;
int i;
uint8_t *nnz;
uint8_t *nnz_cache;
topleft_xy = h->topleft_mb_xy;
top_xy = h->top_mb_xy;
topright_xy = h->topright_mb_xy;
left_xy[LTOP] = h->left_mb_xy[LTOP];
left_xy[LBOT] = h->left_mb_xy[LBOT];
topleft_type = h->topleft_type;
top_type = h->top_type;
topright_type = h->topright_type;
left_type[LTOP]= h->left_type[LTOP];
left_type[LBOT]= h->left_type[LBOT];
if(!IS_SKIP(mb_type)){
if(IS_INTRA(mb_type)){
int type_mask= h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1;
h->topleft_samples_available=
h->top_samples_available=
h->left_samples_available= 0xFFFF;
h->topright_samples_available= 0xEEEA;
if(!(top_type & type_mask)){
h->topleft_samples_available= 0xB3FF;
h->top_samples_available= 0x33FF;
h->topright_samples_available= 0x26EA;
}
if(IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[LTOP])){
if(IS_INTERLACED(mb_type)){
if(!(left_type[LTOP] & type_mask)){
h->topleft_samples_available&= 0xDFFF;
h->left_samples_available&= 0x5FFF;
}
if(!(left_type[LBOT] & type_mask)){
h->topleft_samples_available&= 0xFF5F;
h->left_samples_available&= 0xFF5F;
}
}else{
int left_typei = s->current_picture.f.mb_type[left_xy[LTOP] + s->mb_stride];
assert(left_xy[LTOP] == left_xy[LBOT]);
if(!((left_typei & type_mask) && (left_type[LTOP] & type_mask))){
h->topleft_samples_available&= 0xDF5F;
h->left_samples_available&= 0x5F5F;
}
}
}else{
if(!(left_type[LTOP] & type_mask)){
h->topleft_samples_available&= 0xDF5F;
h->left_samples_available&= 0x5F5F;
}
}
if(!(topleft_type & type_mask))
h->topleft_samples_available&= 0x7FFF;
if(!(topright_type & type_mask))
h->topright_samples_available&= 0xFBFF;
if(IS_INTRA4x4(mb_type)){
if(IS_INTRA4x4(top_type)){
AV_COPY32(h->intra4x4_pred_mode_cache+4+8*0, h->intra4x4_pred_mode + h->mb2br_xy[top_xy]);
}else{
h->intra4x4_pred_mode_cache[4+8*0]=
h->intra4x4_pred_mode_cache[5+8*0]=
h->intra4x4_pred_mode_cache[6+8*0]=
h->intra4x4_pred_mode_cache[7+8*0]= 2 - 3*!(top_type & type_mask);
}
for(i=0; i<2; i++){
if(IS_INTRA4x4(left_type[LEFT(i)])){
int8_t *mode= h->intra4x4_pred_mode + h->mb2br_xy[left_xy[LEFT(i)]];
h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= mode[6-left_block[0+2*i]];
h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= mode[6-left_block[1+2*i]];
}else{
h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= 2 - 3*!(left_type[LEFT(i)] & type_mask);
}
}
}
}
/*
0 . T T. T T T T
1 L . .L . . . .
2 L . .L . . . .
3 . T TL . . . .
4 L . .L . . . .
5 L . .. . . . .
*/
//FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec)
nnz_cache = h->non_zero_count_cache;
if(top_type){
nnz = h->non_zero_count[top_xy];
AV_COPY32(&nnz_cache[4+8* 0], &nnz[4*3]);
if(CHROMA444){
AV_COPY32(&nnz_cache[4+8* 5], &nnz[4* 7]);
AV_COPY32(&nnz_cache[4+8*10], &nnz[4*11]);
}else{
AV_COPY32(&nnz_cache[4+8* 5], &nnz[4* 5]);
AV_COPY32(&nnz_cache[4+8*10], &nnz[4* 9]);
}
}else{
uint32_t top_empty = CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040;
AV_WN32A(&nnz_cache[4+8* 0], top_empty);
AV_WN32A(&nnz_cache[4+8* 5], top_empty);
AV_WN32A(&nnz_cache[4+8*10], top_empty);
}
for (i=0; i<2; i++) {
if(left_type[LEFT(i)]){
nnz = h->non_zero_count[left_xy[LEFT(i)]];
nnz_cache[3+8* 1 + 2*8*i]= nnz[left_block[8+0+2*i]];
nnz_cache[3+8* 2 + 2*8*i]= nnz[left_block[8+1+2*i]];
if(CHROMA444){
nnz_cache[3+8* 6 + 2*8*i]= nnz[left_block[8+0+2*i]+4*4];
nnz_cache[3+8* 7 + 2*8*i]= nnz[left_block[8+1+2*i]+4*4];
nnz_cache[3+8*11 + 2*8*i]= nnz[left_block[8+0+2*i]+8*4];
nnz_cache[3+8*12 + 2*8*i]= nnz[left_block[8+1+2*i]+8*4];
}else{
nnz_cache[3+8* 6 + 8*i]= nnz[left_block[8+4+2*i]];
nnz_cache[3+8*11 + 8*i]= nnz[left_block[8+5+2*i]];
}
}else{
nnz_cache[3+8* 1 + 2*8*i]=
nnz_cache[3+8* 2 + 2*8*i]=
nnz_cache[3+8* 6 + 2*8*i]=
nnz_cache[3+8* 7 + 2*8*i]=
nnz_cache[3+8*11 + 2*8*i]=
nnz_cache[3+8*12 + 2*8*i]= CABAC && !IS_INTRA(mb_type) ? 0 : 64;
}
}
if( CABAC ) {
// top_cbp
if(top_type) {
h->top_cbp = h->cbp_table[top_xy];
} else {
h->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
}
// left_cbp
if (left_type[LTOP]) {
h->left_cbp = (h->cbp_table[left_xy[LTOP]] & 0x7F0)
| ((h->cbp_table[left_xy[LTOP]]>>(left_block[0]&(~1)))&2)
| (((h->cbp_table[left_xy[LBOT]]>>(left_block[2]&(~1)))&2) << 2);
} else {
h->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
}
}
}
if(IS_INTER(mb_type) || (IS_DIRECT(mb_type) && h->direct_spatial_mv_pred)){
int list;
int b_stride = h->b_stride;
for(list=0; list<h->list_count; list++){
int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
int8_t *ref = s->current_picture.f.ref_index[list];
int16_t (*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
int16_t (*mv)[2] = s->current_picture.f.motion_val[list];
if(!USES_LIST(mb_type, list)){
continue;
}
assert(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred));
if(USES_LIST(top_type, list)){
const int b_xy= h->mb2b_xy[top_xy] + 3*b_stride;
AV_COPY128(mv_cache[0 - 1*8], mv[b_xy + 0]);
ref_cache[0 - 1*8]=
ref_cache[1 - 1*8]= ref[4*top_xy + 2];
ref_cache[2 - 1*8]=
ref_cache[3 - 1*8]= ref[4*top_xy + 3];
}else{
AV_ZERO128(mv_cache[0 - 1*8]);
AV_WN32A(&ref_cache[0 - 1*8], ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101);
}
if(mb_type & (MB_TYPE_16x8|MB_TYPE_8x8)){
for(i=0; i<2; i++){
int cache_idx = -1 + i*2*8;
if(USES_LIST(left_type[LEFT(i)], list)){
const int b_xy= h->mb2b_xy[left_xy[LEFT(i)]] + 3;
const int b8_xy= 4*left_xy[LEFT(i)] + 1;
AV_COPY32(mv_cache[cache_idx ], mv[b_xy + b_stride*left_block[0+i*2]]);
AV_COPY32(mv_cache[cache_idx+8], mv[b_xy + b_stride*left_block[1+i*2]]);
ref_cache[cache_idx ]= ref[b8_xy + (left_block[0+i*2]&~1)];
ref_cache[cache_idx+8]= ref[b8_xy + (left_block[1+i*2]&~1)];
}else{
AV_ZERO32(mv_cache[cache_idx ]);
AV_ZERO32(mv_cache[cache_idx+8]);
ref_cache[cache_idx ]=
ref_cache[cache_idx+8]= (left_type[LEFT(i)]) ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
}
}else{
if(USES_LIST(left_type[LTOP], list)){
const int b_xy= h->mb2b_xy[left_xy[LTOP]] + 3;
const int b8_xy= 4*left_xy[LTOP] + 1;
AV_COPY32(mv_cache[-1], mv[b_xy + b_stride*left_block[0]]);
ref_cache[-1]= ref[b8_xy + (left_block[0]&~1)];
}else{
AV_ZERO32(mv_cache[-1]);
ref_cache[-1]= left_type[LTOP] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
}
if(USES_LIST(topright_type, list)){
const int b_xy= h->mb2b_xy[topright_xy] + 3*b_stride;
AV_COPY32(mv_cache[4 - 1*8], mv[b_xy]);
ref_cache[4 - 1*8]= ref[4*topright_xy + 2];
}else{
AV_ZERO32(mv_cache[4 - 1*8]);
ref_cache[4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
if(ref_cache[4 - 1*8] < 0){
if(USES_LIST(topleft_type, list)){
const int b_xy = h->mb2b_xy[topleft_xy] + 3 + b_stride + (h->topleft_partition & 2*b_stride);
const int b8_xy= 4*topleft_xy + 1 + (h->topleft_partition & 2);
AV_COPY32(mv_cache[-1 - 1*8], mv[b_xy]);
ref_cache[-1 - 1*8]= ref[b8_xy];
}else{
AV_ZERO32(mv_cache[-1 - 1*8]);
ref_cache[-1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
}
}
if((mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2)) && !FRAME_MBAFF)
continue;
if(!(mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2))){
uint8_t (*mvd_cache)[2] = &h->mvd_cache[list][scan8[0]];
uint8_t (*mvd)[2] = h->mvd_table[list];
ref_cache[2+8*0] =
ref_cache[2+8*2] = PART_NOT_AVAILABLE;
AV_ZERO32(mv_cache[2+8*0]);
AV_ZERO32(mv_cache[2+8*2]);
if( CABAC ) {
if(USES_LIST(top_type, list)){
const int b_xy= h->mb2br_xy[top_xy];
AV_COPY64(mvd_cache[0 - 1*8], mvd[b_xy + 0]);
}else{
AV_ZERO64(mvd_cache[0 - 1*8]);
}
if(USES_LIST(left_type[LTOP], list)){
const int b_xy= h->mb2br_xy[left_xy[LTOP]] + 6;
AV_COPY16(mvd_cache[-1 + 0*8], mvd[b_xy - left_block[0]]);
AV_COPY16(mvd_cache[-1 + 1*8], mvd[b_xy - left_block[1]]);
}else{
AV_ZERO16(mvd_cache[-1 + 0*8]);
AV_ZERO16(mvd_cache[-1 + 1*8]);
}
if(USES_LIST(left_type[LBOT], list)){
const int b_xy= h->mb2br_xy[left_xy[LBOT]] + 6;
AV_COPY16(mvd_cache[-1 + 2*8], mvd[b_xy - left_block[2]]);
AV_COPY16(mvd_cache[-1 + 3*8], mvd[b_xy - left_block[3]]);
}else{
AV_ZERO16(mvd_cache[-1 + 2*8]);
AV_ZERO16(mvd_cache[-1 + 3*8]);
}
AV_ZERO16(mvd_cache[2+8*0]);
AV_ZERO16(mvd_cache[2+8*2]);
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
uint8_t *direct_cache = &h->direct_cache[scan8[0]];
uint8_t *direct_table = h->direct_table;
fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16>>1, 1);
if(IS_DIRECT(top_type)){
AV_WN32A(&direct_cache[-1*8], 0x01010101u*(MB_TYPE_DIRECT2>>1));
}else if(IS_8X8(top_type)){
int b8_xy = 4*top_xy;
direct_cache[0 - 1*8]= direct_table[b8_xy + 2];
direct_cache[2 - 1*8]= direct_table[b8_xy + 3];
}else{
AV_WN32A(&direct_cache[-1*8], 0x01010101*(MB_TYPE_16x16>>1));
}
if(IS_DIRECT(left_type[LTOP]))
direct_cache[-1 + 0*8]= MB_TYPE_DIRECT2>>1;
else if(IS_8X8(left_type[LTOP]))
direct_cache[-1 + 0*8]= direct_table[4*left_xy[LTOP] + 1 + (left_block[0]&~1)];
else
direct_cache[-1 + 0*8]= MB_TYPE_16x16>>1;
if(IS_DIRECT(left_type[LBOT]))
direct_cache[-1 + 2*8]= MB_TYPE_DIRECT2>>1;
else if(IS_8X8(left_type[LBOT]))
direct_cache[-1 + 2*8]= direct_table[4*left_xy[LBOT] + 1 + (left_block[2]&~1)];
else
direct_cache[-1 + 2*8]= MB_TYPE_16x16>>1;
}
}
}
if(FRAME_MBAFF){
#define MAP_MVS\
MAP_F2F(scan8[0] - 1 - 1*8, topleft_type)\
MAP_F2F(scan8[0] + 0 - 1*8, top_type)\
MAP_F2F(scan8[0] + 1 - 1*8, top_type)\
MAP_F2F(scan8[0] + 2 - 1*8, top_type)\
MAP_F2F(scan8[0] + 3 - 1*8, top_type)\
MAP_F2F(scan8[0] + 4 - 1*8, topright_type)\
MAP_F2F(scan8[0] - 1 + 0*8, left_type[LTOP])\
MAP_F2F(scan8[0] - 1 + 1*8, left_type[LTOP])\
MAP_F2F(scan8[0] - 1 + 2*8, left_type[LBOT])\
MAP_F2F(scan8[0] - 1 + 3*8, left_type[LBOT])
if(MB_FIELD){
#define MAP_F2F(idx, mb_type)\
if(!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
h->ref_cache[list][idx] <<= 1;\
h->mv_cache[list][idx][1] /= 2;\
h->mvd_cache[list][idx][1] >>=1;\
}
MAP_MVS
#undef MAP_F2F
}else{
#define MAP_F2F(idx, mb_type)\
if(IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
h->ref_cache[list][idx] >>= 1;\
h->mv_cache[list][idx][1] <<= 1;\
h->mvd_cache[list][idx][1] <<= 1;\
}
MAP_MVS
#undef MAP_F2F
}
}
}
}
h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[LTOP]);
}
/** /**
* decodes a P_SKIP or B_SKIP macroblock * decodes a P_SKIP or B_SKIP macroblock
*/ */

View File

@ -149,10 +149,8 @@ static int estimate_best_order(double *ref, int min_order, int max_order)
/** /**
* Calculate LPC coefficients for multiple orders * Calculate LPC coefficients for multiple orders
* *
* @param lpc_type LPC method for determining coefficients * @param lpc_type LPC method for determining coefficients,
* 0 = LPC with fixed pre-defined coeffs * see #FFLPCType for details
* 1 = LPC with coeffs determined by Levinson-Durbin recursion
* 2+ = LPC with coeffs determined by Cholesky factorization using (use_lpc-1) passes.
*/ */
int ff_lpc_calc_coefs(LPCContext *s, int ff_lpc_calc_coefs(LPCContext *s,
const int32_t *samples, int blocksize, int min_order, const int32_t *samples, int blocksize, int min_order,

View File

@ -1892,7 +1892,7 @@ AVCodec ff_msmpeg4v1_decoder = {
NULL, NULL,
ff_h263_decode_end, ff_h263_decode_end,
ff_h263_decode_frame, ff_h263_decode_frame,
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_EXPERIMENTAL, CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.max_lowres= 3, .max_lowres= 3,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"), .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"),
.pix_fmts= ff_pixfmt_list_420, .pix_fmts= ff_pixfmt_list_420,

View File

@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H #define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53 #define LIBAVCODEC_VERSION_MAJOR 53
#define LIBAVCODEC_VERSION_MINOR 7 #define LIBAVCODEC_VERSION_MINOR 8
#define LIBAVCODEC_VERSION_MICRO 0 #define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \

View File

@ -29,11 +29,19 @@ SECTION_RODATA
SECTION .text SECTION .text
cextern pw_16
cextern pw_8 cextern pw_8
cextern pw_4 cextern pw_4
cextern pw_2 cextern pw_2
cextern pw_1 cextern pw_1
pw_m32101234: dw -3, -2, -1, 0, 1, 2, 3, 4
pw_m3: times 8 dw -3
pw_pixel_max: times 8 dw ((1 << 10)-1)
pw_512: times 8 dw 512
pd_17: times 4 dd 17
pd_16: times 4 dd 16
; dest, left, right, src ; dest, left, right, src
; output: %1 = (t[n-1] + t[n]*2 + t[n+1] + 2) >> 2 ; output: %1 = (t[n-1] + t[n]*2 + t[n+1] + 2) >> 2
%macro PRED4x4_LOWPASS 4 %macro PRED4x4_LOWPASS 4
@ -464,7 +472,92 @@ PRED8x8_TOP_DC mmxext, pshufw
INIT_XMM INIT_XMM
PRED8x8_TOP_DC sse2 , pshuflw PRED8x8_TOP_DC sse2 , pshuflw
;-----------------------------------------------------------------------------
; void pred8x8_plane(pixel *src, int stride)
;-----------------------------------------------------------------------------
INIT_XMM
cglobal pred8x8_plane_10_sse2, 2,7,7
sub r0, r1
lea r2, [r1+r1*2]
lea r3, [r0+r1*4]
mova m2, [r0]
pmaddwd m2, [pw_m32101234]
HADDD m2, m1
movd m0, [r0-4]
psrld m0, 14
psubw m2, m0 ; H
movd m0, [r3+r1*4-4]
movd m1, [r0+12]
paddw m0, m1
psllw m0, 4 ; 16*(src[7*stride-1] + src[-stride+7])
movzx r4d, word [r3+r1*1-2] ; src[4*stride-1]
movzx r5d, word [r0+r2*1-2] ; src[2*stride-1]
sub r4d, r5d
movzx r6d, word [r3+r1*2-2] ; src[5*stride-1]
movzx r5d, word [r0+r1*2-2] ; src[1*stride-1]
sub r6d, r5d
lea r4d, [r4+r6*2]
movzx r5d, word [r3+r2*1-2] ; src[6*stride-1]
movzx r6d, word [r0+r1*1-2] ; src[0*stride-1]
sub r5d, r6d
lea r5d, [r5+r5*2]
add r4d, r5d
movzx r6d, word [r3+r1*4-2] ; src[7*stride-1]
movzx r5d, word [r0+r1*0-2] ; src[ -stride-1]
sub r6d, r5d
lea r4d, [r4+r6*4]
movd m3, r4d ; V
punpckldq m2, m3
pmaddwd m2, [pd_17]
paddd m2, [pd_16]
psrad m2, 5 ; b, c
mova m3, [pw_pixel_max]
pxor m1, m1
SPLATW m0, m0, 1
SPLATW m4, m2, 2
SPLATW m2, m2, 0
pmullw m2, [pw_m32101234] ; b
pmullw m5, m4, [pw_m3] ; c
paddw m5, [pw_16]
mov r2d, 8
add r0, r1
.loop:
paddsw m6, m2, m5
paddsw m6, m0
psraw m6, 5
CLIPW m6, m1, m3
mova [r0], m6
paddw m5, m4
add r0, r1
dec r2d
jg .loop
REP_RET
;-----------------------------------------------------------------------------
; void pred8x8l_128_dc(pixel *src, int has_topleft, int has_topright, int stride)
;-----------------------------------------------------------------------------
%macro PRED8x8L_128_DC 1
cglobal pred8x8l_128_dc_10_%1, 4,4
mova m0, [pw_512]
lea r1, [r3+r3*2]
lea r2, [r0+r3*4]
MOV8 r0+r3*0, m0, m0
MOV8 r0+r3*1, m0, m0
MOV8 r0+r3*2, m0, m0
MOV8 r0+r1*1, m0, m0
MOV8 r2+r3*0, m0, m0
MOV8 r2+r3*1, m0, m0
MOV8 r2+r3*2, m0, m0
MOV8 r2+r1*1, m0, m0
RET
%endmacro
INIT_MMX
PRED8x8L_128_DC mmxext
INIT_XMM
PRED8x8L_128_DC sse2
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
; void pred8x8l_top_dc(pixel *src, int has_topleft, int has_topright, int stride) ; void pred8x8l_top_dc(pixel *src, int has_topleft, int has_topright, int stride)
@ -1258,7 +1351,7 @@ cglobal pred16x16_horizontal_10_%1, 2,3
MOV16 r0+r1*1, m1, m1, m1, m1 MOV16 r0+r1*1, m1, m1, m1, m1
lea r0, [r0+r1*2] lea r0, [r0+r1*2]
dec r2 dec r2
jge .vloop jg .vloop
REP_RET REP_RET
%endmacro %endmacro
@ -1266,3 +1359,139 @@ INIT_MMX
PRED16x16_HORIZONTAL mmxext PRED16x16_HORIZONTAL mmxext
INIT_XMM INIT_XMM
PRED16x16_HORIZONTAL sse2 PRED16x16_HORIZONTAL sse2
;-----------------------------------------------------------------------------
; void pred16x16_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_DC 1
cglobal pred16x16_dc_10_%1, 2,7
mov r4, r0
sub r0, r1
mova m0, [r0+0]
paddw m0, [r0+mmsize]
%if mmsize==8
paddw m0, [r0+16]
paddw m0, [r0+24]
%endif
HADDW m0, m2
sub r0, 2
movzx r3d, word [r0+r1*1]
movzx r5d, word [r0+r1*2]
%rep 7
lea r0, [r0+r1*2]
movzx r2d, word [r0+r1*1]
add r3d, r2d
movzx r2d, word [r0+r1*2]
add r5d, r2d
%endrep
lea r3d, [r3+r5+16]
movd m1, r3d
paddw m0, m1
psrlw m0, 5
SPLATW m0, m0
mov r3d, 8
.loop:
MOV16 r4+r1*0, m0, m0, m0, m0
MOV16 r4+r1*1, m0, m0, m0, m0
lea r4, [r4+r1*2]
dec r3d
jg .loop
REP_RET
%endmacro
INIT_MMX
PRED16x16_DC mmxext
INIT_XMM
PRED16x16_DC sse2
;-----------------------------------------------------------------------------
; void pred16x16_top_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_TOP_DC 1
cglobal pred16x16_top_dc_10_%1, 2,3
sub r0, r1
mova m0, [r0+0]
paddw m0, [r0+mmsize]
%if mmsize==8
paddw m0, [r0+16]
paddw m0, [r0+24]
%endif
HADDW m0, m2
SPLATW m0, m0
paddw m0, [pw_8]
psrlw m0, 4
mov r2d, 8
.loop:
MOV16 r0+r1*1, m0, m0, m0, m0
MOV16 r0+r1*2, m0, m0, m0, m0
lea r0, [r0+r1*2]
dec r2d
jg .loop
REP_RET
%endmacro
INIT_MMX
PRED16x16_TOP_DC mmxext
INIT_XMM
PRED16x16_TOP_DC sse2
;-----------------------------------------------------------------------------
; void pred16x16_left_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_LEFT_DC 1
cglobal pred16x16_left_dc_10_%1, 2,7
mov r4, r0
sub r0, 2
movzx r5d, word [r0+r1*0]
movzx r6d, word [r0+r1*1]
%rep 7
lea r0, [r0+r1*2]
movzx r2d, word [r0+r1*0]
movzx r3d, word [r0+r1*1]
add r5d, r2d
add r6d, r3d
%endrep
lea r2d, [r5+r6+8]
shr r2d, 4
movd m0, r2d
SPLATW m0, m0
mov r3d, 8
.loop:
MOV16 r4+r1*0, m0, m0, m0, m0
MOV16 r4+r1*1, m0, m0, m0, m0
lea r4, [r4+r1*2]
dec r3d
jg .loop
REP_RET
%endmacro
INIT_MMX
PRED16x16_LEFT_DC mmxext
INIT_XMM
PRED16x16_LEFT_DC sse2
;-----------------------------------------------------------------------------
; void pred16x16_128_dc(pixel *src, int stride)
;-----------------------------------------------------------------------------
%macro PRED16x16_128_DC 1
cglobal pred16x16_128_dc_10_%1, 2,3
mova m0, [pw_512]
mov r2d, 8
.loop:
MOV16 r0+r1*0, m0, m0, m0, m0
MOV16 r0+r1*1, m0, m0, m0, m0
lea r0, [r0+r1*2]
dec r2d
jg .loop
REP_RET
%endmacro
INIT_MMX
PRED16x16_128_DC mmxext
INIT_XMM
PRED16x16_128_DC sse2

View File

@ -47,6 +47,7 @@ PRED8x8(dc, 10, mmxext)
PRED8x8(dc, 10, sse2) PRED8x8(dc, 10, sse2)
PRED8x8(top_dc, 10, mmxext) PRED8x8(top_dc, 10, mmxext)
PRED8x8(top_dc, 10, sse2) PRED8x8(top_dc, 10, sse2)
PRED8x8(plane, 10, sse2)
PRED8x8(vertical, 10, sse2) PRED8x8(vertical, 10, sse2)
PRED8x8(horizontal, 10, sse2) PRED8x8(horizontal, 10, sse2)
@ -55,6 +56,8 @@ void ff_pred8x8l_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, int has_tople
PRED8x8L(dc, 10, sse2) PRED8x8L(dc, 10, sse2)
PRED8x8L(dc, 10, ssse3) PRED8x8L(dc, 10, ssse3)
PRED8x8L(128_dc, 10, mmxext)
PRED8x8L(128_dc, 10, sse2)
PRED8x8L(top_dc, 10, sse2) PRED8x8L(top_dc, 10, sse2)
PRED8x8L(top_dc, 10, ssse3) PRED8x8L(top_dc, 10, ssse3)
PRED8x8L(vertical, 10, sse2) PRED8x8L(vertical, 10, sse2)
@ -73,6 +76,14 @@ PRED8x8L(horizontal_up, 10, ssse3)
#define PRED16x16(TYPE, DEPTH, OPT)\ #define PRED16x16(TYPE, DEPTH, OPT)\
void ff_pred16x16_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, int stride); void ff_pred16x16_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, int stride);
PRED16x16(dc, 10, mmxext)
PRED16x16(dc, 10, sse2)
PRED16x16(top_dc, 10, mmxext)
PRED16x16(top_dc, 10, sse2)
PRED16x16(128_dc, 10, mmxext)
PRED16x16(128_dc, 10, sse2)
PRED16x16(left_dc, 10, mmxext)
PRED16x16(left_dc, 10, sse2)
PRED16x16(vertical, 10, mmxext) PRED16x16(vertical, 10, mmxext)
PRED16x16(vertical, 10, sse2) PRED16x16(vertical, 10, sse2)
PRED16x16(horizontal, 10, mmxext) PRED16x16(horizontal, 10, mmxext)
@ -289,6 +300,12 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_mmxext; h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_mmxext;
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_10_mmxext; h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_10_mmxext;
h->pred8x8l[DC_128_PRED ] = ff_pred8x8l_128_dc_10_mmxext;
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_10_mmxext;
h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_10_mmxext;
h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_10_mmxext;
h->pred16x16[LEFT_DC_PRED8x8 ] = ff_pred16x16_left_dc_10_mmxext;
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_10_mmxext; h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_10_mmxext;
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_10_mmxext; h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_10_mmxext;
} }
@ -301,18 +318,24 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_sse2; h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_sse2;
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_10_sse2; h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_10_sse2;
h->pred8x8[PLANE_PRED8x8 ] = ff_pred8x8_plane_10_sse2;
h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vertical_10_sse2; h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vertical_10_sse2;
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_10_sse2; h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_10_sse2;
h->pred8x8l[VERT_PRED ] = ff_pred8x8l_vertical_10_sse2; h->pred8x8l[VERT_PRED ] = ff_pred8x8l_vertical_10_sse2;
h->pred8x8l[HOR_PRED ] = ff_pred8x8l_horizontal_10_sse2; h->pred8x8l[HOR_PRED ] = ff_pred8x8l_horizontal_10_sse2;
h->pred8x8l[DC_PRED ] = ff_pred8x8l_dc_10_sse2; h->pred8x8l[DC_PRED ] = ff_pred8x8l_dc_10_sse2;
h->pred8x8l[DC_128_PRED ] = ff_pred8x8l_128_dc_10_sse2;
h->pred8x8l[TOP_DC_PRED ] = ff_pred8x8l_top_dc_10_sse2; h->pred8x8l[TOP_DC_PRED ] = ff_pred8x8l_top_dc_10_sse2;
h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_sse2; h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_sse2;
h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_sse2; h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_sse2;
h->pred8x8l[VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_10_sse2; h->pred8x8l[VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_10_sse2;
h->pred8x8l[HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_10_sse2; h->pred8x8l[HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_10_sse2;
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_10_sse2;
h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_10_sse2;
h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_10_sse2;
h->pred16x16[LEFT_DC_PRED8x8 ] = ff_pred16x16_left_dc_10_sse2;
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_10_sse2; h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_10_sse2;
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_10_sse2; h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_10_sse2;
} }

View File

@ -141,6 +141,8 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind
* corresponding filter instance which is added to graph with * corresponding filter instance which is added to graph with
* create_filter(). * create_filter().
* *
* @param filt_ctx Pointer that is set to the created and configured filter
* context on success, set to NULL on failure.
* @param filt_ctx put here a pointer to the created filter context on * @param filt_ctx put here a pointer to the created filter context on
* success, NULL otherwise * success, NULL otherwise
* @param buf pointer to the buffer to parse, *buf will be updated to * @param buf pointer to the buffer to parse, *buf will be updated to

View File

@ -80,16 +80,16 @@ void ff_rdt_subscribe_rule(char *cmd, int size,
* *
* @param buf input buffer * @param buf input buffer
* @param len length of input buffer * @param len length of input buffer
* @param set_id will be set to the set ID this packet belongs to * @param pset_id will be set to the set ID this packet belongs to
* @param seq_no will be set to the sequence number of the packet * @param pseq_no will be set to the sequence number of the packet
* @param stream_id will be set to the stream ID this packet belongs to * @param pstream_id will be set to the stream ID this packet belongs to
* @param is_keyframe will be whether this packet belongs to a keyframe * @param pis_keyframe will be whether this packet belongs to a keyframe
* @param timestamp will be set to the timestamp of the packet * @param ptimestamp will be set to the timestamp of the packet
* @return the amount of bytes consumed, or negative on error * @return the amount of bytes consumed, or negative on error
*/ */
int ff_rdt_parse_header(const uint8_t *buf, int len, int ff_rdt_parse_header(const uint8_t *buf, int len,
int *set_id, int *seq_no, int *stream_id, int *pset_id, int *pseq_no, int *pstream_id,
int *is_keyframe, uint32_t *timestamp); int *pis_keyframe, uint32_t *ptimestamp);
/** /**
* Parse RDT-style packet data (header + media data). * Parse RDT-style packet data (header + media data).

View File

@ -488,9 +488,9 @@ void ff_rtsp_close_streams(AVFormatContext *s);
/** /**
* Close all connection handles within the RTSP (de)muxer * Close all connection handles within the RTSP (de)muxer
* *
* @param rt RTSP (de)muxer context * @param s RTSP (de)muxer context
*/ */
void ff_rtsp_close_connections(AVFormatContext *rt); void ff_rtsp_close_connections(AVFormatContext *s);
/** /**
* Get the description of the stream and set up the RTSPStream child * Get the description of the stream and set up the RTSPStream child

View File

@ -74,12 +74,12 @@ typedef struct URLProtocol {
* @return 0 in case of success, a negative value corresponding to an * @return 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure * AVERROR code in case of failure
*/ */
int ffurl_alloc(URLContext **h, const char *url, int flags); int ffurl_alloc(URLContext **puc, const char *filename, int flags);
/** /**
* Connect an URLContext that has been allocated by ffurl_alloc * Connect an URLContext that has been allocated by ffurl_alloc
*/ */
int ffurl_connect(URLContext *h); int ffurl_connect(URLContext *uc);
/** /**
* Create an URLContext for accessing to the resource indicated by * Create an URLContext for accessing to the resource indicated by
@ -92,7 +92,7 @@ int ffurl_connect(URLContext *h);
* @return 0 in case of success, a negative value corresponding to an * @return 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure * AVERROR code in case of failure
*/ */
int ffurl_open(URLContext **h, const char *url, int flags); int ffurl_open(URLContext **puc, const char *filename, int flags);
/** /**
* Read up to size bytes from the resource accessed by h, and store * Read up to size bytes from the resource accessed by h, and store

View File

@ -2438,7 +2438,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
least one frame of codec data, this makes sure the codec initializes least one frame of codec data, this makes sure the codec initializes
the channel configuration and does not only trust the values from the container. the channel configuration and does not only trust the values from the container.
*/ */
try_decode_frame(st, pkt, (options && i <= orig_nb_streams )? &options[i] : NULL); try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] : NULL);
st->codec_info_nb_frames++; st->codec_info_nb_frames++;
count++; count++;

View File

@ -24,7 +24,7 @@
#include "libavutil/avutil.h" #include "libavutil/avutil.h"
#define LIBAVFORMAT_VERSION_MAJOR 53 #define LIBAVFORMAT_VERSION_MAJOR 53
#define LIBAVFORMAT_VERSION_MINOR 5 #define LIBAVFORMAT_VERSION_MINOR 6
#define LIBAVFORMAT_VERSION_MICRO 0 #define LIBAVFORMAT_VERSION_MICRO 0
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \

View File

@ -484,7 +484,7 @@ static time_t mktimegm(struct tm *tm)
return t; return t;
} }
int av_parse_time(int64_t *timeval, const char *datestr, int duration) int av_parse_time(int64_t *timeval, const char *timestr, int duration)
{ {
const char *p; const char *p;
int64_t t; int64_t t;
@ -506,19 +506,19 @@ int av_parse_time(int64_t *timeval, const char *datestr, int duration)
#undef time #undef time
time_t now = time(0); time_t now = time(0);
len = strlen(datestr); len = strlen(timestr);
if (len > 0) if (len > 0)
lastch = datestr[len - 1]; lastch = timestr[len - 1];
else else
lastch = '\0'; lastch = '\0';
is_utc = (lastch == 'z' || lastch == 'Z'); is_utc = (lastch == 'z' || lastch == 'Z');
memset(&dt, 0, sizeof(dt)); memset(&dt, 0, sizeof(dt));
p = datestr; p = timestr;
q = NULL; q = NULL;
if (!duration) { if (!duration) {
if (!strncasecmp(datestr, "now", len)) { if (!strncasecmp(timestr, "now", len)) {
*timeval = (int64_t) now * 1000000; *timeval = (int64_t) now * 1000000;
return 0; return 0;
} }
@ -555,15 +555,15 @@ int av_parse_time(int64_t *timeval, const char *datestr, int duration)
} }
} }
} else { } else {
/* parse datestr as a duration */ /* parse timestr as a duration */
if (p[0] == '-') { if (p[0] == '-') {
negative = 1; negative = 1;
++p; ++p;
} }
/* parse datestr as HH:MM:SS */ /* parse timestr as HH:MM:SS */
q = small_strptime(p, time_fmt[0], &dt); q = small_strptime(p, time_fmt[0], &dt);
if (!q) { if (!q) {
/* parse datestr as S+ */ /* parse timestr as S+ */
dt.tm_sec = strtol(p, (char **)&q, 10); dt.tm_sec = strtol(p, (char **)&q, 10);
if (q == p) { if (q == p) {
/* the parsing didn't succeed */ /* the parsing didn't succeed */

View File

@ -83,7 +83,7 @@ int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
* January, 1970 up to the time of the parsed date. If timestr cannot * January, 1970 up to the time of the parsed date. If timestr cannot
* be successfully parsed, set *time to INT64_MIN. * be successfully parsed, set *time to INT64_MIN.
* @param datestr a string representing a date or a duration. * @param timestr a string representing a date or a duration.
* - If a date the syntax is: * - If a date the syntax is:
* @code * @code
* [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH[:MM[:SS[.m...]]]}|{HH[MM[SS[.m...]]]}}[Z] * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH[:MM[:SS[.m...]]]}|{HH[MM[SS[.m...]]]}}[Z]

View File

@ -218,7 +218,7 @@ struct SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat
* top-bottom or bottom-top order. If slices are provided in * top-bottom or bottom-top order. If slices are provided in
* non-sequential order the behavior of the function is undefined. * non-sequential order the behavior of the function is undefined.
* *
* @param context the scaling context previously created with * @param c the scaling context previously created with
* sws_getContext() * sws_getContext()
* @param srcSlice the array containing the pointers to the planes of * @param srcSlice the array containing the pointers to the planes of
* the source slice * the source slice
@ -235,8 +235,9 @@ struct SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat
* the destination image * the destination image
* @return the height of the output slice * @return the height of the output slice
*/ */
int sws_scale(struct SwsContext *context, const uint8_t* const srcSlice[], const int srcStride[], int sws_scale(struct SwsContext *c, const uint8_t* const srcSlice[],
int srcSliceY, int srcSliceH, uint8_t* const dst[], const int dstStride[]); const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t* const dst[], const int dstStride[]);
#if LIBSWSCALE_VERSION_MAJOR < 1 #if LIBSWSCALE_VERSION_MAJOR < 1
/** /**

View File

@ -686,18 +686,19 @@ static int check_image_pointers(uint8_t *data[4], enum PixelFormat pix_fmt,
* swscale wrapper, so we don't need to export the SwsContext. * swscale wrapper, so we don't need to export the SwsContext.
* Assumes planar YUV to be in YUV order instead of YVU. * Assumes planar YUV to be in YUV order instead of YVU.
*/ */
int sws_scale(SwsContext *c, const uint8_t* const src[], const int srcStride[], int srcSliceY, int sws_scale(struct SwsContext *c, const uint8_t* const srcSlice[],
int srcSliceH, uint8_t* const dst[], const int dstStride[]) const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t* const dst[], const int dstStride[])
{ {
int i; int i;
const uint8_t* src2[4]= {src[0], src[1], src[2], src[3]}; const uint8_t* src2[4]= {srcSlice[0], srcSlice[1], srcSlice[2], srcSlice[3]};
uint8_t* dst2[4]= {dst[0], dst[1], dst[2], dst[3]}; uint8_t* dst2[4]= {dst[0], dst[1], dst[2], dst[3]};
// do not mess up sliceDir if we have a "trailing" 0-size slice // do not mess up sliceDir if we have a "trailing" 0-size slice
if (srcSliceH == 0) if (srcSliceH == 0)
return 0; return 0;
if (!check_image_pointers(src, c->srcFormat, srcStride)) { if (!check_image_pointers(srcSlice, c->srcFormat, srcStride)) {
av_log(c, AV_LOG_ERROR, "bad src image pointers\n"); av_log(c, AV_LOG_ERROR, "bad src image pointers\n");
return 0; return 0;
} }
@ -718,7 +719,7 @@ int sws_scale(SwsContext *c, const uint8_t* const src[], const int srcStride[],
for (i=0; i<256; i++) { for (i=0; i<256; i++) {
int p, r, g, b, y, u, v, a = 0xff; int p, r, g, b, y, u, v, a = 0xff;
if(c->srcFormat == PIX_FMT_PAL8) { if(c->srcFormat == PIX_FMT_PAL8) {
p=((const uint32_t*)(src[1]))[i]; p=((const uint32_t*)(srcSlice[1]))[i];
a= (p>>24)&0xFF; a= (p>>24)&0xFF;
r= (p>>16)&0xFF; r= (p>>16)&0xFF;
g= (p>> 8)&0xFF; g= (p>> 8)&0xFF;

View File

@ -717,7 +717,9 @@ static void getSubSampleFactors(int *h, int *v, enum PixelFormat format)
*v = av_pix_fmt_descriptors[format].log2_chroma_h; *v = av_pix_fmt_descriptors[format].log2_chroma_h;
} }
int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation) int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
int srcRange, const int table[4], int dstRange,
int brightness, int contrast, int saturation)
{ {
memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*4); memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*4);
memcpy(c->dstColorspaceTable, table, sizeof(int)*4); memcpy(c->dstColorspaceTable, table, sizeof(int)*4);
@ -740,7 +742,9 @@ int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange
return 0; return 0;
} }
int sws_getColorspaceDetails(SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation) int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,
int *srcRange, int **table, int *dstRange,
int *brightness, int *contrast, int *saturation)
{ {
if (!c || isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1; if (!c || isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;