mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Merge remote-tracking branch 'qatar/master'
* qatar/master: doxygen: misc consistency, spelling and wording fixes vcr1: drop unnecessary emms_c() calls without MMX code Replace all uses of av_close_input_file() with avformat_close_input(). lavf: add avformat_close_input(). lavf: deprecate av_close_input_stream(). lavf doxy: add some basic demuxing documentation. lavf doxy: add some general lavf information. lavf doxy: add misc utility functions to a group. lavf doxy: add av_guess_codec/format to the encoding group. lavf doxy: add core functions to a doxy group. Add basic libavdevice documentation. lavc: convert error_recognition to err_recognition. avconv: update -map option help text x86: Require 7 registers for the cabac asm x86: bswap: remove test for bswap instruction bswap: make generic implementation more compiler-friendly h264: remove useless cast proresdec: fix decode_slice() prototype Conflicts: configure doc/APIchanges ffprobe.c libavcodec/avcodec.h libavcodec/celp_math.h libavcodec/h264.c libavfilter/src_movie.c libavformat/anm.c libavformat/avformat.h libavformat/version.h libavutil/avstring.h libavutil/bswap.h Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
8bc7fe4daf
6
avconv.c
6
avconv.c
@ -610,7 +610,7 @@ void exit_program(int ret)
|
|||||||
av_dict_free(&output_files[i].opts);
|
av_dict_free(&output_files[i].opts);
|
||||||
}
|
}
|
||||||
for(i=0;i<nb_input_files;i++) {
|
for(i=0;i<nb_input_files;i++) {
|
||||||
av_close_input_file(input_files[i].ctx);
|
avformat_close_input(&input_files[i].ctx);
|
||||||
}
|
}
|
||||||
for (i = 0; i < nb_input_streams; i++) {
|
for (i = 0; i < nb_input_streams; i++) {
|
||||||
av_freep(&input_streams[i].decoded_frame);
|
av_freep(&input_streams[i].decoded_frame);
|
||||||
@ -3140,7 +3140,7 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena
|
|||||||
ret = avformat_find_stream_info(ic, opts);
|
ret = avformat_find_stream_info(ic, opts);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
|
av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
|
||||||
av_close_input_file(ic);
|
avformat_close_input(&ic);
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4230,7 +4230,7 @@ static const OptionDef options[] = {
|
|||||||
{ "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
|
{ "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
|
||||||
{ "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
|
{ "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
|
||||||
{ "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
|
{ "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
|
||||||
{ "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "file.stream[:syncfile.syncstream]" },
|
{ "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
|
||||||
{ "map_metadata", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map_metadata}, "set metadata information of outfile from infile",
|
{ "map_metadata", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map_metadata}, "set metadata information of outfile from infile",
|
||||||
"outfile[,metadata]:infile[,metadata]" },
|
"outfile[,metadata]:infile[,metadata]" },
|
||||||
{ "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
|
{ "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
|
||||||
|
3
configure
vendored
3
configure
vendored
@ -1139,7 +1139,6 @@ HAVE_LIST="
|
|||||||
asm_mod_y
|
asm_mod_y
|
||||||
attribute_may_alias
|
attribute_may_alias
|
||||||
attribute_packed
|
attribute_packed
|
||||||
bswap
|
|
||||||
cbrtf
|
cbrtf
|
||||||
closesocket
|
closesocket
|
||||||
cmov
|
cmov
|
||||||
@ -2884,8 +2883,6 @@ EOF
|
|||||||
enabled ssse3 && check_asm ssse3 '"pabsw %xmm0, %xmm0"'
|
enabled ssse3 && check_asm ssse3 '"pabsw %xmm0, %xmm0"'
|
||||||
enabled mmx2 && check_asm mmx2 '"pmaxub %mm0, %mm1"'
|
enabled mmx2 && check_asm mmx2 '"pmaxub %mm0, %mm1"'
|
||||||
|
|
||||||
check_asm bswap '"bswap %%eax" ::: "%eax"'
|
|
||||||
|
|
||||||
if ! disabled_any asm mmx yasm; then
|
if ! disabled_any asm mmx yasm; then
|
||||||
if check_cmd $yasmexe --version; then
|
if check_cmd $yasmexe --version; then
|
||||||
enabled x86_64 && yasm_extra="-m amd64"
|
enabled x86_64 && yasm_extra="-m amd64"
|
||||||
|
@ -31,6 +31,10 @@ API changes, most recent first:
|
|||||||
2011-10-20 - b35e9e1 - lavu 51.22.0
|
2011-10-20 - b35e9e1 - lavu 51.22.0
|
||||||
Add av_strtok() to avstring.h.
|
Add av_strtok() to avstring.h.
|
||||||
|
|
||||||
|
2011-xx-xx - xxxxxxx - lavf 53.17.0
|
||||||
|
Add avformat_open_input().
|
||||||
|
Deprecate av_close_input_file() and av_close_input_stream().
|
||||||
|
|
||||||
2011-xx-xx - xxxxxxx - lavc 53.25.0
|
2011-xx-xx - xxxxxxx - lavc 53.25.0
|
||||||
Add nb_samples and extended_data fields to AVFrame.
|
Add nb_samples and extended_data fields to AVFrame.
|
||||||
Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
|
Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
|
||||||
|
6
ffmpeg.c
6
ffmpeg.c
@ -657,7 +657,7 @@ void av_noreturn exit_program(int ret)
|
|||||||
av_dict_free(&output_files[i].opts);
|
av_dict_free(&output_files[i].opts);
|
||||||
}
|
}
|
||||||
for(i=0;i<nb_input_files;i++) {
|
for(i=0;i<nb_input_files;i++) {
|
||||||
av_close_input_file(input_files[i].ctx);
|
avformat_close_input(&input_files[i].ctx);
|
||||||
}
|
}
|
||||||
for (i = 0; i < nb_input_streams; i++) {
|
for (i = 0; i < nb_input_streams; i++) {
|
||||||
av_freep(&input_streams[i].decoded_frame);
|
av_freep(&input_streams[i].decoded_frame);
|
||||||
@ -3423,7 +3423,7 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena
|
|||||||
ret = avformat_find_stream_info(ic, opts);
|
ret = avformat_find_stream_info(ic, opts);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
|
av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
|
||||||
av_close_input_file(ic);
|
avformat_close_input(&ic);
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4667,7 +4667,7 @@ static const OptionDef options[] = {
|
|||||||
{ "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
|
{ "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
|
||||||
{ "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
|
{ "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
|
||||||
{ "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
|
{ "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
|
||||||
{ "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "file.stream[:syncfile.syncstream]" },
|
{ "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
|
||||||
{ "map_channel", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map_channel}, "map an audio channel from one stream to another", "file.stream.channel[:syncfile.syncstream]" },
|
{ "map_channel", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map_channel}, "map an audio channel from one stream to another", "file.stream.channel[:syncfile.syncstream]" },
|
||||||
{ "map_meta_data", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map_meta_data}, "DEPRECATED set meta data information of outfile from infile",
|
{ "map_meta_data", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map_meta_data}, "DEPRECATED set meta data information of outfile from infile",
|
||||||
"outfile[,metadata]:infile[,metadata]" },
|
"outfile[,metadata]:infile[,metadata]" },
|
||||||
|
3
ffplay.c
3
ffplay.c
@ -2679,8 +2679,7 @@ static int read_thread(void *arg)
|
|||||||
if (is->subtitle_stream >= 0)
|
if (is->subtitle_stream >= 0)
|
||||||
stream_component_close(is, is->subtitle_stream);
|
stream_component_close(is, is->subtitle_stream);
|
||||||
if (is->ic) {
|
if (is->ic) {
|
||||||
av_close_input_file(is->ic);
|
avformat_close_input(&is->ic);
|
||||||
is->ic = NULL; /* safety */
|
|
||||||
}
|
}
|
||||||
avio_set_interrupt_cb(NULL);
|
avio_set_interrupt_cb(NULL);
|
||||||
|
|
||||||
|
@ -1160,7 +1160,7 @@ static int probe_file(const char *filename)
|
|||||||
PRINT_CHAPTER(format);
|
PRINT_CHAPTER(format);
|
||||||
writer_print_footer(wctx);
|
writer_print_footer(wctx);
|
||||||
|
|
||||||
av_close_input_file(fmt_ctx);
|
avformat_close_input(&fmt_ctx);
|
||||||
writer_close(&wctx);
|
writer_close(&wctx);
|
||||||
|
|
||||||
end:
|
end:
|
||||||
|
17
ffserver.c
17
ffserver.c
@ -849,7 +849,7 @@ static void close_connection(HTTPContext *c)
|
|||||||
if (st->codec->codec)
|
if (st->codec->codec)
|
||||||
avcodec_close(st->codec);
|
avcodec_close(st->codec);
|
||||||
}
|
}
|
||||||
av_close_input_file(c->fmt_in);
|
avformat_close_input(&c->fmt_in);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* free RTP output streams if any */
|
/* free RTP output streams if any */
|
||||||
@ -2169,7 +2169,7 @@ static int open_input_stream(HTTPContext *c, const char *info)
|
|||||||
c->fmt_in = s;
|
c->fmt_in = s;
|
||||||
if (strcmp(s->iformat->name, "ffm") && avformat_find_stream_info(c->fmt_in, NULL) < 0) {
|
if (strcmp(s->iformat->name, "ffm") && avformat_find_stream_info(c->fmt_in, NULL) < 0) {
|
||||||
http_log("Could not find stream info '%s'\n", input_filename);
|
http_log("Could not find stream info '%s'\n", input_filename);
|
||||||
av_close_input_file(s);
|
avformat_close_input(&s);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2311,8 +2311,7 @@ static int http_prepare_data(HTTPContext *c)
|
|||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
if (c->stream->loop) {
|
if (c->stream->loop) {
|
||||||
av_close_input_file(c->fmt_in);
|
avformat_close_input(&c->fmt_in);
|
||||||
c->fmt_in = NULL;
|
|
||||||
if (open_input_stream(c, "") < 0)
|
if (open_input_stream(c, "") < 0)
|
||||||
goto no_loop;
|
goto no_loop;
|
||||||
goto redo;
|
goto redo;
|
||||||
@ -2736,7 +2735,7 @@ static int http_receive_data(HTTPContext *c)
|
|||||||
|
|
||||||
/* Now we have the actual streams */
|
/* Now we have the actual streams */
|
||||||
if (s->nb_streams != feed->nb_streams) {
|
if (s->nb_streams != feed->nb_streams) {
|
||||||
av_close_input_stream(s);
|
avformat_close_input(&s);
|
||||||
av_free(pb);
|
av_free(pb);
|
||||||
http_log("Feed '%s' stream number does not match registered feed\n",
|
http_log("Feed '%s' stream number does not match registered feed\n",
|
||||||
c->stream->feed_filename);
|
c->stream->feed_filename);
|
||||||
@ -2749,7 +2748,7 @@ static int http_receive_data(HTTPContext *c)
|
|||||||
avcodec_copy_context(fst->codec, st->codec);
|
avcodec_copy_context(fst->codec, st->codec);
|
||||||
}
|
}
|
||||||
|
|
||||||
av_close_input_stream(s);
|
avformat_close_input(&s);
|
||||||
av_free(pb);
|
av_free(pb);
|
||||||
}
|
}
|
||||||
c->buffer_ptr = c->buffer;
|
c->buffer_ptr = c->buffer;
|
||||||
@ -3629,7 +3628,7 @@ static void build_file_streams(void)
|
|||||||
if (avformat_find_stream_info(infile, NULL) < 0) {
|
if (avformat_find_stream_info(infile, NULL) < 0) {
|
||||||
http_log("Could not find codec parameters from '%s'\n",
|
http_log("Could not find codec parameters from '%s'\n",
|
||||||
stream->feed_filename);
|
stream->feed_filename);
|
||||||
av_close_input_file(infile);
|
avformat_close_input(&infile);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
extract_mpeg4_header(infile);
|
extract_mpeg4_header(infile);
|
||||||
@ -3637,7 +3636,7 @@ static void build_file_streams(void)
|
|||||||
for(i=0;i<infile->nb_streams;i++)
|
for(i=0;i<infile->nb_streams;i++)
|
||||||
add_av_stream1(stream, infile->streams[i]->codec, 1);
|
add_av_stream1(stream, infile->streams[i]->codec, 1);
|
||||||
|
|
||||||
av_close_input_file(infile);
|
avformat_close_input(&infile);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3727,7 +3726,7 @@ static void build_feed_streams(void)
|
|||||||
http_log("Deleting feed file '%s' as stream counts differ (%d != %d)\n",
|
http_log("Deleting feed file '%s' as stream counts differ (%d != %d)\n",
|
||||||
feed->feed_filename, s->nb_streams, feed->nb_streams);
|
feed->feed_filename, s->nb_streams, feed->nb_streams);
|
||||||
|
|
||||||
av_close_input_file(s);
|
avformat_close_input(&s);
|
||||||
} else
|
} else
|
||||||
http_log("Deleting feed file '%s' as it appears to be corrupt\n",
|
http_log("Deleting feed file '%s' as it appears to be corrupt\n",
|
||||||
feed->feed_filename);
|
feed->feed_filename);
|
||||||
|
@ -216,7 +216,7 @@ static const float psy_fir_coeffs[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* calculates the attack threshold for ABR from the above table for the LAME psy model
|
* Calculate the ABR attack threshold from the above LAME psymodel table.
|
||||||
*/
|
*/
|
||||||
static float lame_calc_attack_threshold(int bitrate)
|
static float lame_calc_attack_threshold(int bitrate)
|
||||||
{
|
{
|
||||||
|
@ -111,7 +111,7 @@ static av_cold int amrwb_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Decode the frame header in the "MIME/storage" format. This format
|
* Decode the frame header in the "MIME/storage" format. This format
|
||||||
* is simpler and does not carry the auxiliary information of the frame
|
* is simpler and does not carry the auxiliary frame information.
|
||||||
*
|
*
|
||||||
* @param[in] ctx The Context
|
* @param[in] ctx The Context
|
||||||
* @param[in] buf Pointer to the input buffer
|
* @param[in] buf Pointer to the input buffer
|
||||||
@ -133,7 +133,7 @@ static int decode_mime_header(AMRWBContext *ctx, const uint8_t *buf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decodes quantized ISF vectors using 36-bit indexes (6K60 mode only)
|
* Decode quantized ISF vectors using 36-bit indexes (6K60 mode only).
|
||||||
*
|
*
|
||||||
* @param[in] ind Array of 5 indexes
|
* @param[in] ind Array of 5 indexes
|
||||||
* @param[out] isf_q Buffer for isf_q[LP_ORDER]
|
* @param[out] isf_q Buffer for isf_q[LP_ORDER]
|
||||||
@ -160,7 +160,7 @@ static void decode_isf_indices_36b(uint16_t *ind, float *isf_q)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decodes quantized ISF vectors using 46-bit indexes (except 6K60 mode)
|
* Decode quantized ISF vectors using 46-bit indexes (except 6K60 mode).
|
||||||
*
|
*
|
||||||
* @param[in] ind Array of 7 indexes
|
* @param[in] ind Array of 7 indexes
|
||||||
* @param[out] isf_q Buffer for isf_q[LP_ORDER]
|
* @param[out] isf_q Buffer for isf_q[LP_ORDER]
|
||||||
@ -193,8 +193,8 @@ static void decode_isf_indices_46b(uint16_t *ind, float *isf_q)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Apply mean and past ISF values using the prediction factor
|
* Apply mean and past ISF values using the prediction factor.
|
||||||
* Updates past ISF vector
|
* Updates past ISF vector.
|
||||||
*
|
*
|
||||||
* @param[in,out] isf_q Current quantized ISF
|
* @param[in,out] isf_q Current quantized ISF
|
||||||
* @param[in,out] isf_past Past quantized ISF
|
* @param[in,out] isf_past Past quantized ISF
|
||||||
@ -215,7 +215,7 @@ static void isf_add_mean_and_past(float *isf_q, float *isf_past)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Interpolate the fourth ISP vector from current and past frames
|
* Interpolate the fourth ISP vector from current and past frames
|
||||||
* to obtain a ISP vector for each subframe
|
* to obtain an ISP vector for each subframe.
|
||||||
*
|
*
|
||||||
* @param[in,out] isp_q ISPs for each subframe
|
* @param[in,out] isp_q ISPs for each subframe
|
||||||
* @param[in] isp4_past Past ISP for subframe 4
|
* @param[in] isp4_past Past ISP for subframe 4
|
||||||
@ -232,9 +232,9 @@ static void interpolate_isp(double isp_q[4][LP_ORDER], const double *isp4_past)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decode an adaptive codebook index into pitch lag (except 6k60, 8k85 modes)
|
* Decode an adaptive codebook index into pitch lag (except 6k60, 8k85 modes).
|
||||||
* Calculate integer lag and fractional lag always using 1/4 resolution
|
* Calculate integer lag and fractional lag always using 1/4 resolution.
|
||||||
* In 1st and 3rd subframes the index is relative to last subframe integer lag
|
* In 1st and 3rd subframes the index is relative to last subframe integer lag.
|
||||||
*
|
*
|
||||||
* @param[out] lag_int Decoded integer pitch lag
|
* @param[out] lag_int Decoded integer pitch lag
|
||||||
* @param[out] lag_frac Decoded fractional pitch lag
|
* @param[out] lag_frac Decoded fractional pitch lag
|
||||||
@ -271,9 +271,9 @@ static void decode_pitch_lag_high(int *lag_int, int *lag_frac, int pitch_index,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decode a adaptive codebook index into pitch lag for 8k85 and 6k60 modes
|
* Decode an adaptive codebook index into pitch lag for 8k85 and 6k60 modes.
|
||||||
* Description is analogous to decode_pitch_lag_high, but in 6k60 relative
|
* The description is analogous to decode_pitch_lag_high, but in 6k60 the
|
||||||
* index is used for all subframes except the first
|
* relative index is used for all subframes except the first.
|
||||||
*/
|
*/
|
||||||
static void decode_pitch_lag_low(int *lag_int, int *lag_frac, int pitch_index,
|
static void decode_pitch_lag_low(int *lag_int, int *lag_frac, int pitch_index,
|
||||||
uint8_t *base_lag_int, int subframe, enum Mode mode)
|
uint8_t *base_lag_int, int subframe, enum Mode mode)
|
||||||
@ -298,7 +298,7 @@ static void decode_pitch_lag_low(int *lag_int, int *lag_frac, int pitch_index,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Find the pitch vector by interpolating the past excitation at the
|
* Find the pitch vector by interpolating the past excitation at the
|
||||||
* pitch delay, which is obtained in this function
|
* pitch delay, which is obtained in this function.
|
||||||
*
|
*
|
||||||
* @param[in,out] ctx The context
|
* @param[in,out] ctx The context
|
||||||
* @param[in] amr_subframe Current subframe data
|
* @param[in] amr_subframe Current subframe data
|
||||||
@ -351,10 +351,10 @@ static void decode_pitch_vector(AMRWBContext *ctx,
|
|||||||
/**
|
/**
|
||||||
* The next six functions decode_[i]p_track decode exactly i pulses
|
* The next six functions decode_[i]p_track decode exactly i pulses
|
||||||
* positions and amplitudes (-1 or 1) in a subframe track using
|
* positions and amplitudes (-1 or 1) in a subframe track using
|
||||||
* an encoded pulse indexing (TS 26.190 section 5.8.2)
|
* an encoded pulse indexing (TS 26.190 section 5.8.2).
|
||||||
*
|
*
|
||||||
* The results are given in out[], in which a negative number means
|
* The results are given in out[], in which a negative number means
|
||||||
* amplitude -1 and vice versa (i.e., ampl(x) = x / abs(x) )
|
* amplitude -1 and vice versa (i.e., ampl(x) = x / abs(x) ).
|
||||||
*
|
*
|
||||||
* @param[out] out Output buffer (writes i elements)
|
* @param[out] out Output buffer (writes i elements)
|
||||||
* @param[in] code Pulse index (no. of bits varies, see below)
|
* @param[in] code Pulse index (no. of bits varies, see below)
|
||||||
@ -470,7 +470,7 @@ static void decode_6p_track(int *out, int code, int m, int off) ///code: 6m-2 bi
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Decode the algebraic codebook index to pulse positions and signs,
|
* Decode the algebraic codebook index to pulse positions and signs,
|
||||||
* then construct the algebraic codebook vector
|
* then construct the algebraic codebook vector.
|
||||||
*
|
*
|
||||||
* @param[out] fixed_vector Buffer for the fixed codebook excitation
|
* @param[out] fixed_vector Buffer for the fixed codebook excitation
|
||||||
* @param[in] pulse_hi MSBs part of the pulse index array (higher modes only)
|
* @param[in] pulse_hi MSBs part of the pulse index array (higher modes only)
|
||||||
@ -541,7 +541,7 @@ static void decode_fixed_vector(float *fixed_vector, const uint16_t *pulse_hi,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decode pitch gain and fixed gain correction factor
|
* Decode pitch gain and fixed gain correction factor.
|
||||||
*
|
*
|
||||||
* @param[in] vq_gain Vector-quantized index for gains
|
* @param[in] vq_gain Vector-quantized index for gains
|
||||||
* @param[in] mode Mode of the current frame
|
* @param[in] mode Mode of the current frame
|
||||||
@ -559,7 +559,7 @@ static void decode_gains(const uint8_t vq_gain, const enum Mode mode,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Apply pitch sharpening filters to the fixed codebook vector
|
* Apply pitch sharpening filters to the fixed codebook vector.
|
||||||
*
|
*
|
||||||
* @param[in] ctx The context
|
* @param[in] ctx The context
|
||||||
* @param[in,out] fixed_vector Fixed codebook excitation
|
* @param[in,out] fixed_vector Fixed codebook excitation
|
||||||
@ -580,7 +580,7 @@ static void pitch_sharpening(AMRWBContext *ctx, float *fixed_vector)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate the voicing factor (-1.0 = unvoiced to 1.0 = voiced)
|
* Calculate the voicing factor (-1.0 = unvoiced to 1.0 = voiced).
|
||||||
*
|
*
|
||||||
* @param[in] p_vector, f_vector Pitch and fixed excitation vectors
|
* @param[in] p_vector, f_vector Pitch and fixed excitation vectors
|
||||||
* @param[in] p_gain, f_gain Pitch and fixed gains
|
* @param[in] p_gain, f_gain Pitch and fixed gains
|
||||||
@ -599,8 +599,8 @@ static float voice_factor(float *p_vector, float p_gain,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reduce fixed vector sparseness by smoothing with one of three IR filters
|
* Reduce fixed vector sparseness by smoothing with one of three IR filters,
|
||||||
* Also known as "adaptive phase dispersion"
|
* also known as "adaptive phase dispersion".
|
||||||
*
|
*
|
||||||
* @param[in] ctx The context
|
* @param[in] ctx The context
|
||||||
* @param[in,out] fixed_vector Unfiltered fixed vector
|
* @param[in,out] fixed_vector Unfiltered fixed vector
|
||||||
@ -670,7 +670,7 @@ static float *anti_sparseness(AMRWBContext *ctx,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate a stability factor {teta} based on distance between
|
* Calculate a stability factor {teta} based on distance between
|
||||||
* current and past isf. A value of 1 shows maximum signal stability
|
* current and past isf. A value of 1 shows maximum signal stability.
|
||||||
*/
|
*/
|
||||||
static float stability_factor(const float *isf, const float *isf_past)
|
static float stability_factor(const float *isf, const float *isf_past)
|
||||||
{
|
{
|
||||||
@ -687,7 +687,7 @@ static float stability_factor(const float *isf, const float *isf_past)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Apply a non-linear fixed gain smoothing in order to reduce
|
* Apply a non-linear fixed gain smoothing in order to reduce
|
||||||
* fluctuation in the energy of excitation
|
* fluctuation in the energy of excitation.
|
||||||
*
|
*
|
||||||
* @param[in] fixed_gain Unsmoothed fixed gain
|
* @param[in] fixed_gain Unsmoothed fixed gain
|
||||||
* @param[in,out] prev_tr_gain Previous threshold gain (updated)
|
* @param[in,out] prev_tr_gain Previous threshold gain (updated)
|
||||||
@ -718,7 +718,7 @@ static float noise_enhancer(float fixed_gain, float *prev_tr_gain,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Filter the fixed_vector to emphasize the higher frequencies
|
* Filter the fixed_vector to emphasize the higher frequencies.
|
||||||
*
|
*
|
||||||
* @param[in,out] fixed_vector Fixed codebook vector
|
* @param[in,out] fixed_vector Fixed codebook vector
|
||||||
* @param[in] voice_fac Frame voicing factor
|
* @param[in] voice_fac Frame voicing factor
|
||||||
@ -742,7 +742,7 @@ static void pitch_enhancer(float *fixed_vector, float voice_fac)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Conduct 16th order linear predictive coding synthesis from excitation
|
* Conduct 16th order linear predictive coding synthesis from excitation.
|
||||||
*
|
*
|
||||||
* @param[in] ctx Pointer to the AMRWBContext
|
* @param[in] ctx Pointer to the AMRWBContext
|
||||||
* @param[in] lpc Pointer to the LPC coefficients
|
* @param[in] lpc Pointer to the LPC coefficients
|
||||||
@ -802,7 +802,7 @@ static void de_emphasis(float *out, float *in, float m, float mem[1])
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Upsample a signal by 5/4 ratio (from 12.8kHz to 16kHz) using
|
* Upsample a signal by 5/4 ratio (from 12.8kHz to 16kHz) using
|
||||||
* a FIR interpolation filter. Uses past data from before *in address
|
* a FIR interpolation filter. Uses past data from before *in address.
|
||||||
*
|
*
|
||||||
* @param[out] out Buffer for interpolated signal
|
* @param[out] out Buffer for interpolated signal
|
||||||
* @param[in] in Current signal data (length 0.8*o_size)
|
* @param[in] in Current signal data (length 0.8*o_size)
|
||||||
@ -832,7 +832,7 @@ static void upsample_5_4(float *out, const float *in, int o_size)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate the high-band gain based on encoded index (23k85 mode) or
|
* Calculate the high-band gain based on encoded index (23k85 mode) or
|
||||||
* on the low-band speech signal and the Voice Activity Detection flag
|
* on the low-band speech signal and the Voice Activity Detection flag.
|
||||||
*
|
*
|
||||||
* @param[in] ctx The context
|
* @param[in] ctx The context
|
||||||
* @param[in] synth LB speech synthesis at 12.8k
|
* @param[in] synth LB speech synthesis at 12.8k
|
||||||
@ -857,7 +857,7 @@ static float find_hb_gain(AMRWBContext *ctx, const float *synth,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate the high-band excitation with the same energy from the lower
|
* Generate the high-band excitation with the same energy from the lower
|
||||||
* one and scaled by the given gain
|
* one and scaled by the given gain.
|
||||||
*
|
*
|
||||||
* @param[in] ctx The context
|
* @param[in] ctx The context
|
||||||
* @param[out] hb_exc Buffer for the excitation
|
* @param[out] hb_exc Buffer for the excitation
|
||||||
@ -880,7 +880,7 @@ static void scaled_hb_excitation(AMRWBContext *ctx, float *hb_exc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate the auto-correlation for the ISF difference vector
|
* Calculate the auto-correlation for the ISF difference vector.
|
||||||
*/
|
*/
|
||||||
static float auto_correlation(float *diff_isf, float mean, int lag)
|
static float auto_correlation(float *diff_isf, float mean, int lag)
|
||||||
{
|
{
|
||||||
@ -896,7 +896,7 @@ static float auto_correlation(float *diff_isf, float mean, int lag)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Extrapolate a ISF vector to the 16kHz range (20th order LP)
|
* Extrapolate a ISF vector to the 16kHz range (20th order LP)
|
||||||
* used at mode 6k60 LP filter for the high frequency band
|
* used at mode 6k60 LP filter for the high frequency band.
|
||||||
*
|
*
|
||||||
* @param[out] out Buffer for extrapolated isf
|
* @param[out] out Buffer for extrapolated isf
|
||||||
* @param[in] isf Input isf vector
|
* @param[in] isf Input isf vector
|
||||||
@ -981,7 +981,7 @@ static void lpc_weighting(float *out, const float *lpc, float gamma, int size)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Conduct 20th order linear predictive coding synthesis for the high
|
* Conduct 20th order linear predictive coding synthesis for the high
|
||||||
* frequency band excitation at 16kHz
|
* frequency band excitation at 16kHz.
|
||||||
*
|
*
|
||||||
* @param[in] ctx The context
|
* @param[in] ctx The context
|
||||||
* @param[in] subframe Current subframe index (0 to 3)
|
* @param[in] subframe Current subframe index (0 to 3)
|
||||||
@ -1019,8 +1019,8 @@ static void hb_synthesis(AMRWBContext *ctx, int subframe, float *samples,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Apply to high-band samples a 15th order filter
|
* Apply a 15th order filter to high-band samples.
|
||||||
* The filter characteristic depends on the given coefficients
|
* The filter characteristic depends on the given coefficients.
|
||||||
*
|
*
|
||||||
* @param[out] out Buffer for filtered output
|
* @param[out] out Buffer for filtered output
|
||||||
* @param[in] fir_coef Filter coefficients
|
* @param[in] fir_coef Filter coefficients
|
||||||
@ -1048,7 +1048,7 @@ static void hb_fir_filter(float *out, const float fir_coef[HB_FIR_SIZE + 1],
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update context state before the next subframe
|
* Update context state before the next subframe.
|
||||||
*/
|
*/
|
||||||
static void update_sub_state(AMRWBContext *ctx)
|
static void update_sub_state(AMRWBContext *ctx)
|
||||||
{
|
{
|
||||||
|
@ -2667,7 +2667,7 @@ typedef struct AVCodecContext {
|
|||||||
|
|
||||||
#if FF_API_X264_GLOBAL_OPTS
|
#if FF_API_X264_GLOBAL_OPTS
|
||||||
/**
|
/**
|
||||||
* Influences how often B-frames are used.
|
* Influence how often B-frames are used.
|
||||||
* - encoding: Set by user.
|
* - encoding: Set by user.
|
||||||
* - decoding: unused
|
* - decoding: unused
|
||||||
*/
|
*/
|
||||||
@ -2748,7 +2748,7 @@ typedef struct AVCodecContext {
|
|||||||
int mv0_threshold;
|
int mv0_threshold;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Adjusts sensitivity of b_frame_strategy 1.
|
* Adjust sensitivity of b_frame_strategy 1.
|
||||||
* - encoding: Set by user.
|
* - encoding: Set by user.
|
||||||
* - decoding: unused
|
* - decoding: unused
|
||||||
*/
|
*/
|
||||||
@ -3032,7 +3032,7 @@ typedef struct AVCodecContext {
|
|||||||
|
|
||||||
#if FF_API_FLAC_GLOBAL_OPTS
|
#if FF_API_FLAC_GLOBAL_OPTS
|
||||||
/**
|
/**
|
||||||
* Determines which LPC analysis algorithm to use.
|
* Determine which LPC analysis algorithm to use.
|
||||||
* - encoding: Set by user
|
* - encoding: Set by user
|
||||||
* - decoding: unused
|
* - decoding: unused
|
||||||
*/
|
*/
|
||||||
@ -4263,7 +4263,7 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
|
|||||||
AVPacket *avpkt);
|
AVPacket *avpkt);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Frees all allocated data in the given subtitle struct.
|
* Free all allocated data in the given subtitle struct.
|
||||||
*
|
*
|
||||||
* @param sub AVSubtitle to free.
|
* @param sub AVSubtitle to free.
|
||||||
*/
|
*/
|
||||||
@ -4628,8 +4628,16 @@ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
|
|||||||
unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
|
unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
<<<<<<< HEAD
|
||||||
* Logs a generic warning message about a missing feature. This function is
|
* Logs a generic warning message about a missing feature. This function is
|
||||||
* intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
|
* intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
|
||||||
|
||||||| merged common ancestors
|
||||||
|
* Logs a generic warning message about a missing feature. This function is
|
||||||
|
* intended to be used internally by Libav (libavcodec, libavformat, etc.)
|
||||||
|
=======
|
||||||
|
* Log a generic warning message about a missing feature. This function is
|
||||||
|
* intended to be used internally by Libav (libavcodec, libavformat, etc.)
|
||||||
|
>>>>>>> qatar/master
|
||||||
* only, and would normally not be used by applications.
|
* only, and would normally not be used by applications.
|
||||||
* @param[in] avc a pointer to an arbitrary struct of which the first field is
|
* @param[in] avc a pointer to an arbitrary struct of which the first field is
|
||||||
* a pointer to an AVClass struct
|
* a pointer to an AVClass struct
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* finds the end of the current frame in the bitstream.
|
* Find the end of the current frame in the bitstream.
|
||||||
* @return the position of the first byte of the next frame, or -1
|
* @return the position of the first byte of the next frame, or -1
|
||||||
*/
|
*/
|
||||||
static int cavs_find_frame_end(ParseContext *pc, const uint8_t *buf,
|
static int cavs_find_frame_end(ParseContext *pc, const uint8_t *buf,
|
||||||
|
@ -74,7 +74,7 @@ static inline int bidir_sal(int value, int offset)
|
|||||||
int64_t ff_dot_product(const int16_t *a, const int16_t *b, int length);
|
int64_t ff_dot_product(const int16_t *a, const int16_t *b, int length);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* returns the dot product.
|
* Return the dot product.
|
||||||
* @param a input data array
|
* @param a input data array
|
||||||
* @param b input data array
|
* @param b input data array
|
||||||
* @param length number of elements
|
* @param length number of elements
|
||||||
|
@ -39,7 +39,7 @@ typedef struct DCAParseContext {
|
|||||||
|| state == DCA_MARKER_RAW_LE || state == DCA_MARKER_RAW_BE || state == DCA_HD_MARKER)
|
|| state == DCA_MARKER_RAW_LE || state == DCA_MARKER_RAW_BE || state == DCA_HD_MARKER)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* finds the end of the current frame in the bitstream.
|
* Find the end of the current frame in the bitstream.
|
||||||
* @return the position of the first byte of the next frame, or -1
|
* @return the position of the first byte of the next frame, or -1
|
||||||
*/
|
*/
|
||||||
static int dca_find_frame_end(DCAParseContext * pc1, const uint8_t * buf,
|
static int dca_find_frame_end(DCAParseContext * pc1, const uint8_t * buf,
|
||||||
|
@ -1824,7 +1824,7 @@ static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* permutes an 8x8 block.
|
* Permute an 8x8 block.
|
||||||
* @param block the block which will be permuted according to the given permutation vector
|
* @param block the block which will be permuted according to the given permutation vector
|
||||||
* @param permutation the permutation vector
|
* @param permutation the permutation vector
|
||||||
* @param last the last non zero coefficient in scantable order, used to speed the permutation up
|
* @param last the last non zero coefficient in scantable order, used to speed the permutation up
|
||||||
|
@ -80,7 +80,7 @@ static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* replaces the current MB with a flat dc only version.
|
* Replace the current MB with a flat dc-only version.
|
||||||
*/
|
*/
|
||||||
static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
|
static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
|
||||||
{
|
{
|
||||||
@ -714,7 +714,7 @@ static int is_intra_more_likely(MpegEncContext *s){
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ff_er_frame_start(MpegEncContext *s){
|
void ff_er_frame_start(MpegEncContext *s){
|
||||||
if(!s->error_recognition) return;
|
if(!s->err_recognition) return;
|
||||||
|
|
||||||
memset(s->error_status_table, MV_ERROR|AC_ERROR|DC_ERROR|VP_START|AC_END|DC_END|MV_END, s->mb_stride*s->mb_height*sizeof(uint8_t));
|
memset(s->error_status_table, MV_ERROR|AC_ERROR|DC_ERROR|VP_START|AC_END|DC_END|MV_END, s->mb_stride*s->mb_height*sizeof(uint8_t));
|
||||||
s->error_count= 3*s->mb_num;
|
s->error_count= 3*s->mb_num;
|
||||||
@ -722,7 +722,7 @@ void ff_er_frame_start(MpegEncContext *s){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* adds a slice.
|
* Add a slice.
|
||||||
* @param endx x component of the last macroblock, can be -1 for the last of the previous line
|
* @param endx x component of the last macroblock, can be -1 for the last of the previous line
|
||||||
* @param status the status at the end (MV_END, AC_ERROR, ...), it is assumed that no earlier end or
|
* @param status the status at the end (MV_END, AC_ERROR, ...), it is assumed that no earlier end or
|
||||||
* error of the same type occurred
|
* error of the same type occurred
|
||||||
@ -742,7 +742,7 @@ void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int en
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!s->error_recognition) return;
|
if(!s->err_recognition) return;
|
||||||
|
|
||||||
mask &= ~VP_START;
|
mask &= ~VP_START;
|
||||||
if(status & (AC_ERROR|AC_END)){
|
if(status & (AC_ERROR|AC_END)){
|
||||||
@ -798,7 +798,7 @@ void ff_er_frame_end(MpegEncContext *s){
|
|||||||
int size = s->b8_stride * 2 * s->mb_height;
|
int size = s->b8_stride * 2 * s->mb_height;
|
||||||
Picture *pic= s->current_picture_ptr;
|
Picture *pic= s->current_picture_ptr;
|
||||||
|
|
||||||
if(!s->error_recognition || s->error_count==0 || s->avctx->lowres ||
|
if(!s->err_recognition || s->error_count==0 || s->avctx->lowres ||
|
||||||
s->avctx->hwaccel ||
|
s->avctx->hwaccel ||
|
||||||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
||||||
s->picture_structure != PICT_FRAME || // we do not support ER of field pictures yet, though it should not crash if enabled
|
s->picture_structure != PICT_FRAME || // we do not support ER of field pictures yet, though it should not crash if enabled
|
||||||
@ -872,7 +872,7 @@ void ff_er_frame_end(MpegEncContext *s){
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
/* handle missing slices */
|
/* handle missing slices */
|
||||||
if(s->error_recognition>=4){
|
if(s->err_recognition&AV_EF_EXPLODE){
|
||||||
int end_ok=1;
|
int end_ok=1;
|
||||||
|
|
||||||
for(i=s->mb_num-2; i>=s->mb_width+100; i--){ //FIXME +100 hack
|
for(i=s->mb_num-2; i>=s->mb_width+100; i--){ //FIXME +100 hack
|
||||||
|
@ -85,13 +85,13 @@ gb
|
|||||||
getbitcontext
|
getbitcontext
|
||||||
|
|
||||||
OPEN_READER(name, gb)
|
OPEN_READER(name, gb)
|
||||||
loads gb into local variables
|
load gb into local variables
|
||||||
|
|
||||||
CLOSE_READER(name, gb)
|
CLOSE_READER(name, gb)
|
||||||
stores local vars in gb
|
store local vars in gb
|
||||||
|
|
||||||
UPDATE_CACHE(name, gb)
|
UPDATE_CACHE(name, gb)
|
||||||
refills the internal cache from the bitstream
|
refill the internal cache from the bitstream
|
||||||
after this call at least MIN_CACHE_BITS will be available,
|
after this call at least MIN_CACHE_BITS will be available,
|
||||||
|
|
||||||
GET_CACHE(name, gb)
|
GET_CACHE(name, gb)
|
||||||
@ -290,7 +290,7 @@ static inline unsigned int get_bits(GetBitContext *s, int n){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Shows 1-25 bits.
|
* Show 1-25 bits.
|
||||||
*/
|
*/
|
||||||
static inline unsigned int show_bits(GetBitContext *s, int n){
|
static inline unsigned int show_bits(GetBitContext *s, int n){
|
||||||
register int tmp;
|
register int tmp;
|
||||||
@ -337,7 +337,7 @@ static inline void skip_bits1(GetBitContext *s){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* reads 0-32 bits.
|
* Read 0-32 bits.
|
||||||
*/
|
*/
|
||||||
static inline unsigned int get_bits_long(GetBitContext *s, int n){
|
static inline unsigned int get_bits_long(GetBitContext *s, int n){
|
||||||
if (n <= MIN_CACHE_BITS) return get_bits(s, n);
|
if (n <= MIN_CACHE_BITS) return get_bits(s, n);
|
||||||
@ -353,14 +353,14 @@ static inline unsigned int get_bits_long(GetBitContext *s, int n){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* reads 0-32 bits as a signed integer.
|
* Read 0-32 bits as a signed integer.
|
||||||
*/
|
*/
|
||||||
static inline int get_sbits_long(GetBitContext *s, int n) {
|
static inline int get_sbits_long(GetBitContext *s, int n) {
|
||||||
return sign_extend(get_bits_long(s, n), n);
|
return sign_extend(get_bits_long(s, n), n);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* shows 0-32 bits.
|
* Show 0-32 bits.
|
||||||
*/
|
*/
|
||||||
static inline unsigned int show_bits_long(GetBitContext *s, int n){
|
static inline unsigned int show_bits_long(GetBitContext *s, int n){
|
||||||
if (n <= MIN_CACHE_BITS) return show_bits(s, n);
|
if (n <= MIN_CACHE_BITS) return show_bits(s, n);
|
||||||
@ -380,7 +380,7 @@ static inline int check_marker(GetBitContext *s, const char *msg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init GetBitContext.
|
* Inititalize GetBitContext.
|
||||||
* @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger than the actual read bits
|
* @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger than the actual read bits
|
||||||
* because some optimized bitstream readers read 32 or 64 bit at once and could read over the end
|
* because some optimized bitstream readers read 32 or 64 bit at once and could read over the end
|
||||||
* @param bit_size the size of the buffer in bits
|
* @param bit_size the size of the buffer in bits
|
||||||
@ -442,7 +442,6 @@ void free_vlc(VLC *vlc);
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
|
||||||
* If the vlc code is invalid and max_depth=1, then no bits will be removed.
|
* If the vlc code is invalid and max_depth=1, then no bits will be removed.
|
||||||
* If the vlc code is invalid and max_depth>1, then the number of bits removed
|
* If the vlc code is invalid and max_depth>1, then the number of bits removed
|
||||||
* is undefined.
|
* is undefined.
|
||||||
@ -504,7 +503,7 @@ void free_vlc(VLC *vlc);
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* parses a vlc code, faster than get_vlc()
|
* Parse a vlc code, faster than get_vlc().
|
||||||
* @param bits is the number of bits which will be read at once, must be
|
* @param bits is the number of bits which will be read at once, must be
|
||||||
* identical to nb_bits in init_vlc()
|
* identical to nb_bits in init_vlc()
|
||||||
* @param max_depth is the number of times bits bits must be read to completely
|
* @param max_depth is the number of times bits bits must be read to completely
|
||||||
|
@ -97,7 +97,7 @@ static av_cold int h261_decode_init(AVCodecContext *avctx){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes the group of blocks header or slice header.
|
* Decode the group of blocks header or slice header.
|
||||||
* @return <0 if an error occurred
|
* @return <0 if an error occurred
|
||||||
*/
|
*/
|
||||||
static int h261_decode_gob_header(H261Context *h){
|
static int h261_decode_gob_header(H261Context *h){
|
||||||
@ -150,7 +150,7 @@ static int h261_decode_gob_header(H261Context *h){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes the group of blocks / video packet header.
|
* Decode the group of blocks / video packet header.
|
||||||
* @return <0 if no resync found
|
* @return <0 if no resync found
|
||||||
*/
|
*/
|
||||||
static int ff_h261_resync(H261Context *h){
|
static int ff_h261_resync(H261Context *h){
|
||||||
@ -191,7 +191,7 @@ static int ff_h261_resync(H261Context *h){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes skipped macroblocks
|
* Decode skipped macroblocks.
|
||||||
* @return 0
|
* @return 0
|
||||||
*/
|
*/
|
||||||
static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
|
static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
|
||||||
@ -355,7 +355,7 @@ intra:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes a macroblock
|
* Decode a macroblock.
|
||||||
* @return <0 if an error occurred
|
* @return <0 if an error occurred
|
||||||
*/
|
*/
|
||||||
static int h261_decode_block(H261Context * h, DCTELEM * block,
|
static int h261_decode_block(H261Context * h, DCTELEM * block,
|
||||||
@ -437,7 +437,7 @@ static int h261_decode_block(H261Context * h, DCTELEM * block,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes the H261 picture header.
|
* Decode the H.261 picture header.
|
||||||
* @return <0 if no startcode found
|
* @return <0 if no startcode found
|
||||||
*/
|
*/
|
||||||
static int h261_decode_picture_header(H261Context *h){
|
static int h261_decode_picture_header(H261Context *h){
|
||||||
|
@ -251,7 +251,7 @@ void ff_h261_encode_init(MpegEncContext *s){
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* encodes a 8x8 block.
|
* Encode an 8x8 block.
|
||||||
* @param block the 8x8 block
|
* @param block the 8x8 block
|
||||||
* @param n block index (0-3 are luma, 4-5 are chroma)
|
* @param n block index (0-3 are luma, 4-5 are chroma)
|
||||||
*/
|
*/
|
||||||
|
@ -127,7 +127,7 @@ av_cold int ff_h263_decode_end(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* returns the number of bytes consumed for building the current frame
|
* Return the number of bytes consumed for building the current frame.
|
||||||
*/
|
*/
|
||||||
static int get_consumed_bytes(MpegEncContext *s, int buf_size){
|
static int get_consumed_bytes(MpegEncContext *s, int buf_size){
|
||||||
int pos= (get_bits_count(&s->gb)+7)>>3;
|
int pos= (get_bits_count(&s->gb)+7)>>3;
|
||||||
@ -310,7 +310,7 @@ static int decode_slice(MpegEncContext *s){
|
|||||||
max_extra+= 17;
|
max_extra+= 17;
|
||||||
|
|
||||||
/* buggy padding but the frame should still end approximately at the bitstream end */
|
/* buggy padding but the frame should still end approximately at the bitstream end */
|
||||||
if((s->workaround_bugs&FF_BUG_NO_PADDING) && s->error_recognition>=3)
|
if((s->workaround_bugs&FF_BUG_NO_PADDING) && (s->err_recognition&AV_EF_BUFFER))
|
||||||
max_extra+= 48;
|
max_extra+= 48;
|
||||||
else if((s->workaround_bugs&FF_BUG_NO_PADDING))
|
else if((s->workaround_bugs&FF_BUG_NO_PADDING))
|
||||||
max_extra+= 256*256*256*64;
|
max_extra+= 256*256*256*64;
|
||||||
|
@ -63,7 +63,8 @@ static const enum PixelFormat hwaccel_pixfmt_list_h264_jpeg_420[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
|
* Check if the top & left blocks are available if needed and
|
||||||
|
* change the dc mode so it only uses the available blocks.
|
||||||
*/
|
*/
|
||||||
int ff_h264_check_intra4x4_pred_mode(H264Context *h){
|
int ff_h264_check_intra4x4_pred_mode(H264Context *h){
|
||||||
MpegEncContext * const s = &h->s;
|
MpegEncContext * const s = &h->s;
|
||||||
@ -2597,7 +2598,7 @@ static void clone_slice(H264Context *dst, H264Context *src)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* computes profile from profile_idc and constraint_set?_flags
|
* Compute profile from profile_idc and constraint_set?_flags.
|
||||||
*
|
*
|
||||||
* @param sps SPS
|
* @param sps SPS
|
||||||
*
|
*
|
||||||
@ -2624,7 +2625,7 @@ int ff_h264_get_profile(SPS *sps)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes a slice header.
|
* Decode a slice header.
|
||||||
* This will also call MPV_common_init() and frame_start() as needed.
|
* This will also call MPV_common_init() and frame_start() as needed.
|
||||||
*
|
*
|
||||||
* @param h h264context
|
* @param h h264context
|
||||||
@ -3743,12 +3744,12 @@ static int execute_decode_slices(H264Context *h, int context_count){
|
|||||||
} else {
|
} else {
|
||||||
for(i = 1; i < context_count; i++) {
|
for(i = 1; i < context_count; i++) {
|
||||||
hx = h->thread_context[i];
|
hx = h->thread_context[i];
|
||||||
hx->s.error_recognition = avctx->error_recognition;
|
hx->s.err_recognition = avctx->err_recognition;
|
||||||
hx->s.error_count = 0;
|
hx->s.error_count = 0;
|
||||||
hx->x264_build= h->x264_build;
|
hx->x264_build= h->x264_build;
|
||||||
}
|
}
|
||||||
|
|
||||||
avctx->execute(avctx, (void *)decode_slice,
|
avctx->execute(avctx, decode_slice,
|
||||||
h->thread_context, NULL, context_count, sizeof(void*));
|
h->thread_context, NULL, context_count, sizeof(void*));
|
||||||
|
|
||||||
/* pull back stuff from slices to master context */
|
/* pull back stuff from slices to master context */
|
||||||
@ -4024,7 +4025,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* returns the number of bytes consumed for building the current frame
|
* Return the number of bytes consumed for building the current frame.
|
||||||
*/
|
*/
|
||||||
static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size){
|
static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size){
|
||||||
if(pos==0) pos=1; //avoid infinite loops (i doubt that is needed but ...)
|
if(pos==0) pos=1; //avoid infinite loops (i doubt that is needed but ...)
|
||||||
|
@ -784,14 +784,14 @@ static av_always_inline uint16_t pack8to16(int a, int b){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets the chroma qp.
|
* Get the chroma qp.
|
||||||
*/
|
*/
|
||||||
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale){
|
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale){
|
||||||
return h->pps.chroma_qp_table[t][qscale];
|
return h->pps.chroma_qp_table[t][qscale];
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets the predicted intra4x4 prediction mode.
|
* Get the predicted intra4x4 prediction mode.
|
||||||
*/
|
*/
|
||||||
static av_always_inline int pred_intra_mode(H264Context *h, int n){
|
static av_always_inline int pred_intra_mode(H264Context *h, int n){
|
||||||
const int index8= scan8[n];
|
const int index8= scan8[n];
|
||||||
|
@ -1863,7 +1863,7 @@ static av_always_inline void decode_cabac_luma_residual( H264Context *h, const u
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes a macroblock
|
* Decode a macroblock.
|
||||||
* @return 0 if OK, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed
|
* @return 0 if OK, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed
|
||||||
*/
|
*/
|
||||||
int ff_h264_decode_mb_cabac(H264Context *h) {
|
int ff_h264_decode_mb_cabac(H264Context *h) {
|
||||||
|
@ -281,7 +281,7 @@ static int8_t cavlc_level_tab[7][1<<LEVEL_TAB_BITS][2];
|
|||||||
#define RUN7_VLC_BITS 6
|
#define RUN7_VLC_BITS 6
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets the predicted number of non-zero coefficients.
|
* Get the predicted number of non-zero coefficients.
|
||||||
* @param n block index
|
* @param n block index
|
||||||
*/
|
*/
|
||||||
static inline int pred_non_zero_count(H264Context *h, int n){
|
static inline int pred_non_zero_count(H264Context *h, int n){
|
||||||
@ -436,7 +436,7 @@ static inline int get_level_prefix(GetBitContext *gb){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes a residual block.
|
* Decode a residual block.
|
||||||
* @param n block index
|
* @param n block index
|
||||||
* @param scantable scantable
|
* @param scantable scantable
|
||||||
* @param max_coeff number of coefficients in the block
|
* @param max_coeff number of coefficients in the block
|
||||||
|
@ -86,7 +86,7 @@ static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets the predicted MV.
|
* Get the predicted MV.
|
||||||
* @param n the block index
|
* @param n the block index
|
||||||
* @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
|
* @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
|
||||||
* @param mx the x component of the predicted motion vector
|
* @param mx the x component of the predicted motion vector
|
||||||
@ -142,7 +142,7 @@ static av_always_inline void pred_motion(H264Context * const h, int n, int part_
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets the directionally predicted 16x8 MV.
|
* Get the directionally predicted 16x8 MV.
|
||||||
* @param n the block index
|
* @param n the block index
|
||||||
* @param mx the x component of the predicted motion vector
|
* @param mx the x component of the predicted motion vector
|
||||||
* @param my the y component of the predicted motion vector
|
* @param my the y component of the predicted motion vector
|
||||||
@ -177,7 +177,7 @@ static av_always_inline void pred_16x8_motion(H264Context * const h, int n, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets the directionally predicted 8x16 MV.
|
* Get the directionally predicted 8x16 MV.
|
||||||
* @param n the block index
|
* @param n the block index
|
||||||
* @param mx the x component of the predicted motion vector
|
* @param mx the x component of the predicted motion vector
|
||||||
* @param my the y component of the predicted motion vector
|
* @param my the y component of the predicted motion vector
|
||||||
|
@ -148,7 +148,7 @@ int ff_h263_decode_mba(MpegEncContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes the group of blocks header or slice header.
|
* Decode the group of blocks header or slice header.
|
||||||
* @return <0 if an error occurred
|
* @return <0 if an error occurred
|
||||||
*/
|
*/
|
||||||
static int h263_decode_gob_header(MpegEncContext *s)
|
static int h263_decode_gob_header(MpegEncContext *s)
|
||||||
@ -203,7 +203,7 @@ static int h263_decode_gob_header(MpegEncContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* finds the next resync_marker
|
* Find the next resync_marker.
|
||||||
* @param p pointer to buffer to scan
|
* @param p pointer to buffer to scan
|
||||||
* @param end pointer to the end of the buffer
|
* @param end pointer to the end of the buffer
|
||||||
* @return pointer to the next resync_marker, or end if none was found
|
* @return pointer to the next resync_marker, or end if none was found
|
||||||
@ -224,7 +224,7 @@ const uint8_t *ff_h263_find_resync_marker(const uint8_t *restrict p, const uint8
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes the group of blocks / video packet header.
|
* Decode the group of blocks / video packet header.
|
||||||
* @return bit position of the resync_marker, or <0 if none was found
|
* @return bit position of the resync_marker, or <0 if none was found
|
||||||
*/
|
*/
|
||||||
int ff_h263_resync(MpegEncContext *s){
|
int ff_h263_resync(MpegEncContext *s){
|
||||||
@ -306,7 +306,7 @@ int h263_decode_motion(MpegEncContext * s, int pred, int f_code)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Decodes RVLC of H.263+ UMV */
|
/* Decode RVLC of H.263+ UMV */
|
||||||
static int h263p_decode_umotion(MpegEncContext * s, int pred)
|
static int h263p_decode_umotion(MpegEncContext * s, int pred)
|
||||||
{
|
{
|
||||||
int code = 0, sign;
|
int code = 0, sign;
|
||||||
@ -484,7 +484,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
|
|||||||
level = get_bits(&s->gb, 8);
|
level = get_bits(&s->gb, 8);
|
||||||
if((level&0x7F) == 0){
|
if((level&0x7F) == 0){
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "illegal dc %d at %d %d\n", level, s->mb_x, s->mb_y);
|
av_log(s->avctx, AV_LOG_ERROR, "illegal dc %d at %d %d\n", level, s->mb_x, s->mb_y);
|
||||||
if(s->error_recognition >= FF_ER_COMPLIANT)
|
if(s->err_recognition & AV_EF_BITSTREAM)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (level == 255)
|
if (level == 255)
|
||||||
|
@ -302,7 +302,7 @@ void ff_clean_h263_qscales(MpegEncContext *s){
|
|||||||
static const int dquant_code[5]= {1,0,9,2,3};
|
static const int dquant_code[5]= {1,0,9,2,3};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* encodes a 8x8 block.
|
* Encode an 8x8 block.
|
||||||
* @param block the 8x8 block
|
* @param block the 8x8 block
|
||||||
* @param n block index (0-3 are luma, 4-5 are chroma)
|
* @param n block index (0-3 are luma, 4-5 are chroma)
|
||||||
*/
|
*/
|
||||||
|
@ -36,7 +36,7 @@ typedef struct LATMParseContext{
|
|||||||
} LATMParseContext;
|
} LATMParseContext;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* finds the end of the current frame in the bitstream.
|
* Find the end of the current frame in the bitstream.
|
||||||
* @return the position of the first byte of the next frame, or -1
|
* @return the position of the first byte of the next frame, or -1
|
||||||
*/
|
*/
|
||||||
static int latm_find_frame_end(AVCodecParserContext *s1, const uint8_t *buf,
|
static int latm_find_frame_end(AVCodecParserContext *s1, const uint8_t *buf,
|
||||||
|
@ -34,7 +34,7 @@ typedef struct MJPEGParserContext{
|
|||||||
}MJPEGParserContext;
|
}MJPEGParserContext;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* finds the end of the current frame in the bitstream.
|
* Find the end of the current frame in the bitstream.
|
||||||
* @return the position of the first byte of the next frame, or -1
|
* @return the position of the first byte of the next frame, or -1
|
||||||
*/
|
*/
|
||||||
static int find_frame_end(MJPEGParserContext *m, const uint8_t *buf, int buf_size){
|
static int find_frame_end(MJPEGParserContext *m, const uint8_t *buf, int buf_size){
|
||||||
|
@ -1653,9 +1653,10 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
|
|||||||
#define DECODE_SLICE_OK 0
|
#define DECODE_SLICE_OK 0
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes a slice. MpegEncContext.mb_y must be set to the MB row from the startcode
|
* Decode a slice.
|
||||||
* @return DECODE_SLICE_ERROR if the slice is damaged<br>
|
* MpegEncContext.mb_y must be set to the MB row from the startcode.
|
||||||
* DECODE_SLICE_OK if this slice is ok<br>
|
* @return DECODE_SLICE_ERROR if the slice is damaged,
|
||||||
|
* DECODE_SLICE_OK if this slice is OK
|
||||||
*/
|
*/
|
||||||
static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
|
static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
|
||||||
const uint8_t **buf, int buf_size)
|
const uint8_t **buf, int buf_size)
|
||||||
|
@ -119,7 +119,7 @@ extern uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3];
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* predicts the dc.
|
* Predict the dc.
|
||||||
* encoding quantized level -> quantized diff
|
* encoding quantized level -> quantized diff
|
||||||
* decoding quantized diff -> quantized level
|
* decoding quantized diff -> quantized level
|
||||||
* @param n block index (0-3 are luma, 4-5 are chroma)
|
* @param n block index (0-3 are luma, 4-5 are chroma)
|
||||||
@ -174,7 +174,7 @@ static inline int ff_mpeg4_pred_dc(MpegEncContext * s, int n, int level, int *di
|
|||||||
}else{
|
}else{
|
||||||
level += pred;
|
level += pred;
|
||||||
ret= level;
|
ret= level;
|
||||||
if(s->error_recognition>=3){
|
if(s->err_recognition&AV_EF_BITSTREAM){
|
||||||
if(level<0){
|
if(level<0){
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "dc<0 at %dx%d\n", s->mb_x, s->mb_y);
|
av_log(s->avctx, AV_LOG_ERROR, "dc<0 at %dx%d\n", s->mb_x, s->mb_y);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
#include "parser.h"
|
#include "parser.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* finds the end of the current frame in the bitstream.
|
* Find the end of the current frame in the bitstream.
|
||||||
* @return the position of the first byte of the next frame, or -1
|
* @return the position of the first byte of the next frame, or -1
|
||||||
*/
|
*/
|
||||||
int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size);
|
int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size);
|
||||||
|
@ -47,7 +47,7 @@ static const int mb_type_b_map[4]= {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* predicts the ac.
|
* Predict the ac.
|
||||||
* @param n block index (0-3 are luma, 4-5 are chroma)
|
* @param n block index (0-3 are luma, 4-5 are chroma)
|
||||||
* @param dir the ac prediction direction
|
* @param dir the ac prediction direction
|
||||||
*/
|
*/
|
||||||
@ -349,7 +349,7 @@ static void mpeg4_decode_sprite_trajectory(MpegEncContext * s, GetBitContext *gb
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes the next video packet.
|
* Decode the next video packet.
|
||||||
* @return <0 if something went wrong
|
* @return <0 if something went wrong
|
||||||
*/
|
*/
|
||||||
int mpeg4_decode_video_packet_header(MpegEncContext *s)
|
int mpeg4_decode_video_packet_header(MpegEncContext *s)
|
||||||
@ -435,7 +435,7 @@ int mpeg4_decode_video_packet_header(MpegEncContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets the average motion vector for a GMC MB.
|
* Get the average motion vector for a GMC MB.
|
||||||
* @param n either 0 for the x component or 1 for y
|
* @param n either 0 for the x component or 1 for y
|
||||||
* @return the average MV for a GMC MB
|
* @return the average MV for a GMC MB
|
||||||
*/
|
*/
|
||||||
@ -481,7 +481,7 @@ static inline int get_amv(MpegEncContext *s, int n){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes the dc value.
|
* Decode the dc value.
|
||||||
* @param n block index (0-3 are luma, 4-5 are chroma)
|
* @param n block index (0-3 are luma, 4-5 are chroma)
|
||||||
* @param dir_ptr the prediction direction will be stored here
|
* @param dir_ptr the prediction direction will be stored here
|
||||||
* @return the quantized dc
|
* @return the quantized dc
|
||||||
@ -516,7 +516,7 @@ static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr)
|
|||||||
|
|
||||||
if (code > 8){
|
if (code > 8){
|
||||||
if(get_bits1(&s->gb)==0){ /* marker */
|
if(get_bits1(&s->gb)==0){ /* marker */
|
||||||
if(s->error_recognition>=2){
|
if(s->err_recognition&AV_EF_BITSTREAM){
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "dc marker bit missing\n");
|
av_log(s->avctx, AV_LOG_ERROR, "dc marker bit missing\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -528,7 +528,7 @@ static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes first partition.
|
* Decode first partition.
|
||||||
* @return number of MBs decoded or <0 if an error occurred
|
* @return number of MBs decoded or <0 if an error occurred
|
||||||
*/
|
*/
|
||||||
static int mpeg4_decode_partition_a(MpegEncContext *s){
|
static int mpeg4_decode_partition_a(MpegEncContext *s){
|
||||||
@ -780,7 +780,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes the first & second partition
|
* Decode the first and second partition.
|
||||||
* @return <0 if error (and sets error type in the error_status_table)
|
* @return <0 if error (and sets error type in the error_status_table)
|
||||||
*/
|
*/
|
||||||
int ff_mpeg4_decode_partitions(MpegEncContext *s)
|
int ff_mpeg4_decode_partitions(MpegEncContext *s)
|
||||||
@ -833,7 +833,7 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes a block.
|
* Decode a block.
|
||||||
* @return <0 if an error occurred
|
* @return <0 if an error occurred
|
||||||
*/
|
*/
|
||||||
static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
|
static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
|
||||||
@ -1012,7 +1012,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
|
|||||||
else level= level * qmul - qadd;
|
else level= level * qmul - qadd;
|
||||||
|
|
||||||
if((unsigned)(level + 2048) > 4095){
|
if((unsigned)(level + 2048) > 4095){
|
||||||
if(s->error_recognition > FF_ER_COMPLIANT){
|
if(s->err_recognition & AV_EF_BITSTREAM){
|
||||||
if(level > 2560 || level<-2560){
|
if(level > 2560 || level<-2560){
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "|level| overflow in 3. esc, qp=%d\n", s->qscale);
|
av_log(s->avctx, AV_LOG_ERROR, "|level| overflow in 3. esc, qp=%d\n", s->qscale);
|
||||||
return -1;
|
return -1;
|
||||||
@ -1848,7 +1848,7 @@ no_cplx_est:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decodes the user data stuff in the header.
|
* Decode the user data stuff in the header.
|
||||||
* Also initializes divx/xvid/lavc_version/build.
|
* Also initializes divx/xvid/lavc_version/build.
|
||||||
*/
|
*/
|
||||||
static int decode_user_data(MpegEncContext *s, GetBitContext *gb){
|
static int decode_user_data(MpegEncContext *s, GetBitContext *gb){
|
||||||
@ -2118,7 +2118,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decode mpeg4 headers
|
* Decode mpeg4 headers.
|
||||||
* @return <0 if no VOP found (or a damaged one)
|
* @return <0 if no VOP found (or a damaged one)
|
||||||
* FRAME_SKIPPED if a not coded VOP is found
|
* FRAME_SKIPPED if a not coded VOP is found
|
||||||
* 0 if a VOP is found
|
* 0 if a VOP is found
|
||||||
|
@ -238,7 +238,7 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* encodes the dc value.
|
* Encode the dc value.
|
||||||
* @param n block index (0-3 are luma, 4-5 are chroma)
|
* @param n block index (0-3 are luma, 4-5 are chroma)
|
||||||
*/
|
*/
|
||||||
static inline void mpeg4_encode_dc(PutBitContext * s, int level, int n)
|
static inline void mpeg4_encode_dc(PutBitContext * s, int level, int n)
|
||||||
@ -291,7 +291,7 @@ static inline int mpeg4_get_dc_length(int level, int n){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* encodes a 8x8 block
|
* Encode an 8x8 block.
|
||||||
* @param n block index (0-3 are luma, 4-5 are chroma)
|
* @param n block index (0-3 are luma, 4-5 are chroma)
|
||||||
*/
|
*/
|
||||||
static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n, int intra_dc,
|
static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n, int intra_dc,
|
||||||
|
@ -289,7 +289,7 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* allocates a Picture
|
* Allocate a Picture.
|
||||||
* The pixels are allocated/set by calling get_buffer() if shared = 0
|
* The pixels are allocated/set by calling get_buffer() if shared = 0
|
||||||
*/
|
*/
|
||||||
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
|
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
|
||||||
@ -388,7 +388,7 @@ fail: // for the FF_ALLOCZ_OR_GOTO macro
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* deallocates a picture
|
* Deallocate a picture.
|
||||||
*/
|
*/
|
||||||
static void free_picture(MpegEncContext *s, Picture *pic)
|
static void free_picture(MpegEncContext *s, Picture *pic)
|
||||||
{
|
{
|
||||||
@ -625,9 +625,9 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sets the given MpegEncContext to common defaults
|
* Set the given MpegEncContext to common defaults
|
||||||
* (same for encoding and decoding).
|
* (same for encoding and decoding).
|
||||||
* the changed fields will not depend upon the
|
* The changed fields will not depend upon the
|
||||||
* prior state of the MpegEncContext.
|
* prior state of the MpegEncContext.
|
||||||
*/
|
*/
|
||||||
void MPV_common_defaults(MpegEncContext *s)
|
void MPV_common_defaults(MpegEncContext *s)
|
||||||
@ -653,7 +653,7 @@ void MPV_common_defaults(MpegEncContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sets the given MpegEncContext to defaults for decoding.
|
* Set the given MpegEncContext to defaults for decoding.
|
||||||
* the changed fields will not depend upon
|
* the changed fields will not depend upon
|
||||||
* the prior state of the MpegEncContext.
|
* the prior state of the MpegEncContext.
|
||||||
*/
|
*/
|
||||||
@ -1264,7 +1264,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s->error_recognition= avctx->error_recognition;
|
s->err_recognition = avctx->err_recognition;
|
||||||
|
|
||||||
/* set dequantizer, we can't do it during init as it might change for mpeg4
|
/* set dequantizer, we can't do it during init as it might change for mpeg4
|
||||||
and we can't do it in the header decode as init is not called for mpeg4 there yet */
|
and we can't do it in the header decode as init is not called for mpeg4 there yet */
|
||||||
@ -1359,7 +1359,7 @@ void MPV_frame_end(MpegEncContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* draws an line from (ex, ey) -> (sx, sy).
|
* Draw a line from (ex, ey) -> (sx, sy).
|
||||||
* @param w width of the image
|
* @param w width of the image
|
||||||
* @param h height of the image
|
* @param h height of the image
|
||||||
* @param stride stride/linesize of the image
|
* @param stride stride/linesize of the image
|
||||||
@ -1408,7 +1408,7 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* draws an arrow from (ex, ey) -> (sx, sy).
|
* Draw an arrow from (ex, ey) -> (sx, sy).
|
||||||
* @param w width of the image
|
* @param w width of the image
|
||||||
* @param h height of the image
|
* @param h height of the image
|
||||||
* @param stride stride/linesize of the image
|
* @param stride stride/linesize of the image
|
||||||
@ -1441,7 +1441,7 @@ static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* prints debuging info for the given picture.
|
* Print debuging info for the given picture.
|
||||||
*/
|
*/
|
||||||
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
|
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
|
||||||
|
|
||||||
@ -2099,7 +2099,7 @@ static inline void add_dequant_dct(MpegEncContext *s,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cleans dc, ac, coded_block for the current non intra MB
|
* Clean dc, ac, coded_block for the current non-intra MB.
|
||||||
*/
|
*/
|
||||||
void ff_clean_intra_table_entries(MpegEncContext *s)
|
void ff_clean_intra_table_entries(MpegEncContext *s)
|
||||||
{
|
{
|
||||||
@ -2404,7 +2404,6 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
|
||||||
* @param h is the normal height, this will be reduced automatically if needed for the last row
|
* @param h is the normal height, this will be reduced automatically if needed for the last row
|
||||||
*/
|
*/
|
||||||
void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
|
void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
|
||||||
|
@ -492,7 +492,7 @@ typedef struct MpegEncContext {
|
|||||||
GetBitContext last_resync_gb; ///< used to search for the next resync marker
|
GetBitContext last_resync_gb; ///< used to search for the next resync marker
|
||||||
int mb_num_left; ///< number of MBs left in this video packet (for partitioned Slices only)
|
int mb_num_left; ///< number of MBs left in this video packet (for partitioned Slices only)
|
||||||
int next_p_frame_damaged; ///< set if the next p frame is damaged, to avoid showing trashed b frames
|
int next_p_frame_damaged; ///< set if the next p frame is damaged, to avoid showing trashed b frames
|
||||||
int error_recognition;
|
int err_recognition;
|
||||||
|
|
||||||
ParseContext parse_context;
|
ParseContext parse_context;
|
||||||
|
|
||||||
@ -735,8 +735,8 @@ void ff_init_block_index(MpegEncContext *s);
|
|||||||
void ff_copy_picture(Picture *dst, Picture *src);
|
void ff_copy_picture(Picture *dst, Picture *src);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* allocates a Picture
|
* Allocate a Picture.
|
||||||
* The pixels are allocated/set by calling get_buffer() if shared=0
|
* The pixels are allocated/set by calling get_buffer() if shared = 0.
|
||||||
*/
|
*/
|
||||||
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared);
|
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared);
|
||||||
|
|
||||||
|
@ -42,14 +42,14 @@
|
|||||||
int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
|
int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* allocates a Picture
|
* Allocate a Picture.
|
||||||
* The pixels are allocated/set by calling get_buffer() if shared=0
|
* The pixels are allocated/set by calling get_buffer() if shared = 0.
|
||||||
*/
|
*/
|
||||||
int alloc_picture(MpegEncContext *s, Picture *pic, int shared);
|
int alloc_picture(MpegEncContext *s, Picture *pic, int shared);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sets the given MpegEncContext to common defaults (same for encoding and decoding).
|
* Set the given MpegEncContext to common defaults (same for encoding and decoding).
|
||||||
* the changed fields will not depend upon the prior state of the MpegEncContext.
|
* The changed fields will not depend upon the prior state of the MpegEncContext.
|
||||||
*/
|
*/
|
||||||
void MPV_common_defaults(MpegEncContext *s);
|
void MPV_common_defaults(MpegEncContext *s);
|
||||||
|
|
||||||
|
@ -228,7 +228,7 @@ static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContex
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sets the given MpegEncContext to defaults for encoding.
|
* Set the given MpegEncContext to defaults for encoding.
|
||||||
* the changed fields will not depend upon the prior state of the MpegEncContext.
|
* the changed fields will not depend upon the prior state of the MpegEncContext.
|
||||||
*/
|
*/
|
||||||
static void MPV_encode_defaults(MpegEncContext *s){
|
static void MPV_encode_defaults(MpegEncContext *s){
|
||||||
|
@ -1810,7 +1810,7 @@ int ff_msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
|
|||||||
i-= 192;
|
i-= 192;
|
||||||
if(i&(~63)){
|
if(i&(~63)){
|
||||||
const int left= get_bits_left(&s->gb);
|
const int left= get_bits_left(&s->gb);
|
||||||
if(((i+192 == 64 && level/qmul==-1) || s->error_recognition<=1) && left>=0){
|
if(((i+192 == 64 && level/qmul==-1) || !(s->err_recognition&AV_EF_BITSTREAM)) && left>=0){
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "ignoring overflow at %d %d\n", s->mb_x, s->mb_y);
|
av_log(s->avctx, AV_LOG_ERROR, "ignoring overflow at %d %d\n", s->mb_x, s->mb_y);
|
||||||
break;
|
break;
|
||||||
}else{
|
}else{
|
||||||
|
@ -214,7 +214,7 @@ void av_parser_close(AVCodecParserContext *s)
|
|||||||
/*****************************************************/
|
/*****************************************************/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* combines the (truncated) bitstream to a complete frame
|
* Combine the (truncated) bitstream to a complete frame.
|
||||||
* @return -1 if no complete frame could be created, AVERROR(ENOMEM) if there was a memory allocation error
|
* @return -1 if no complete frame could be created, AVERROR(ENOMEM) if there was a memory allocation error
|
||||||
*/
|
*/
|
||||||
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
|
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
|
||||||
|
@ -499,8 +499,9 @@ static void decode_slice_plane(ProresContext *ctx, ProresThreadData *td,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int decode_slice(AVCodecContext *avctx, ProresThreadData *td)
|
static int decode_slice(AVCodecContext *avctx, void *tdata)
|
||||||
{
|
{
|
||||||
|
ProresThreadData *td = tdata;
|
||||||
ProresContext *ctx = avctx->priv_data;
|
ProresContext *ctx = avctx->priv_data;
|
||||||
int mb_x_pos = td->x_pos;
|
int mb_x_pos = td->x_pos;
|
||||||
int mb_y_pos = td->y_pos;
|
int mb_y_pos = td->y_pos;
|
||||||
@ -621,7 +622,7 @@ static int decode_picture(ProresContext *ctx, int pic_num,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return avctx->execute(avctx, (void *) decode_slice,
|
return avctx->execute(avctx, decode_slice,
|
||||||
ctx->slice_data, NULL, slice_num,
|
ctx->slice_data, NULL, slice_num,
|
||||||
sizeof(ctx->slice_data[0]));
|
sizeof(ctx->slice_data[0]));
|
||||||
}
|
}
|
||||||
|
@ -324,7 +324,7 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Updates the next thread's AVCodecContext with values from the reference thread's context.
|
* Update the next thread's AVCodecContext with values from the reference thread's context.
|
||||||
*
|
*
|
||||||
* @param dst The destination context.
|
* @param dst The destination context.
|
||||||
* @param src The source context.
|
* @param src The source context.
|
||||||
|
@ -300,7 +300,7 @@ int ff_vbv_update(MpegEncContext *s, int frame_size){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* modifies the bitrate curve from pass1 for one frame
|
* Modify the bitrate curve from pass1 for one frame.
|
||||||
*/
|
*/
|
||||||
static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num){
|
static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num){
|
||||||
RateControlContext *rcc= &s->rc_context;
|
RateControlContext *rcc= &s->rc_context;
|
||||||
@ -404,7 +404,7 @@ static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, doubl
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets the qmin & qmax for pict_type
|
* Get the qmin & qmax for pict_type.
|
||||||
*/
|
*/
|
||||||
static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pict_type){
|
static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pict_type){
|
||||||
int qmin= s->avctx->lmin;
|
int qmin= s->avctx->lmin;
|
||||||
|
@ -90,7 +90,7 @@ static double bessel(double x){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* builds a polyphase filterbank.
|
* Build a polyphase filterbank.
|
||||||
* @param factor resampling factor
|
* @param factor resampling factor
|
||||||
* @param scale wanted sum of coefficients for each filter
|
* @param scale wanted sum of coefficients for each filter
|
||||||
* @param type 0->cubic, 1->blackman nuttall windowed sinc, 2..16->kaiser windowed sinc beta=2..16
|
* @param type 0->cubic, 1->blackman nuttall windowed sinc, 2..16->kaiser windowed sinc beta=2..16
|
||||||
|
@ -31,15 +31,15 @@
|
|||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Waits for decoding threads to finish and resets internal
|
* Wait for decoding threads to finish and reset internal state.
|
||||||
* state. Called by avcodec_flush_buffers().
|
* Called by avcodec_flush_buffers().
|
||||||
*
|
*
|
||||||
* @param avctx The context.
|
* @param avctx The context.
|
||||||
*/
|
*/
|
||||||
void ff_thread_flush(AVCodecContext *avctx);
|
void ff_thread_flush(AVCodecContext *avctx);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Submits a new frame to a decoding thread.
|
* Submit a new frame to a decoding thread.
|
||||||
* Returns the next available frame in picture. *got_picture_ptr
|
* Returns the next available frame in picture. *got_picture_ptr
|
||||||
* will be 0 if none is available.
|
* will be 0 if none is available.
|
||||||
* The return value on success is the size of the consumed packet for
|
* The return value on success is the size of the consumed packet for
|
||||||
@ -62,8 +62,7 @@ int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture,
|
|||||||
void ff_thread_finish_setup(AVCodecContext *avctx);
|
void ff_thread_finish_setup(AVCodecContext *avctx);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Notifies later decoding threads when part of their reference picture
|
* Notify later decoding threads when part of their reference picture is ready.
|
||||||
* is ready.
|
|
||||||
* Call this when some part of the picture is finished decoding.
|
* Call this when some part of the picture is finished decoding.
|
||||||
* Later calls with lower values of progress have no effect.
|
* Later calls with lower values of progress have no effect.
|
||||||
*
|
*
|
||||||
@ -75,7 +74,7 @@ void ff_thread_finish_setup(AVCodecContext *avctx);
|
|||||||
void ff_thread_report_progress(AVFrame *f, int progress, int field);
|
void ff_thread_report_progress(AVFrame *f, int progress, int field);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Waits for earlier decoding threads to finish reference pictures
|
* Wait for earlier decoding threads to finish reference pictures.
|
||||||
* Call this before accessing some part of a picture, with a given
|
* Call this before accessing some part of a picture, with a given
|
||||||
* value for progress, and it will return after the responsible decoding
|
* value for progress, and it will return after the responsible decoding
|
||||||
* thread calls ff_thread_report_progress() with the same or
|
* thread calls ff_thread_report_progress() with the same or
|
||||||
|
@ -96,7 +96,7 @@ static void vc1_extract_headers(AVCodecParserContext *s, AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* finds the end of the current frame in the bitstream.
|
* Find the end of the current frame in the bitstream.
|
||||||
* @return the position of the first byte of the next frame, or -1
|
* @return the position of the first byte of the next frame, or -1
|
||||||
*/
|
*/
|
||||||
static int vc1_find_frame_end(ParseContext *pc, const uint8_t *buf,
|
static int vc1_find_frame_end(ParseContext *pc, const uint8_t *buf,
|
||||||
|
@ -114,8 +114,6 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
*picture= *(AVFrame*)&a->picture;
|
*picture= *(AVFrame*)&a->picture;
|
||||||
*data_size = sizeof(AVPicture);
|
*data_size = sizeof(AVPicture);
|
||||||
|
|
||||||
emms_c();
|
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,8 +128,6 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
|
|||||||
p->pict_type= AV_PICTURE_TYPE_I;
|
p->pict_type= AV_PICTURE_TYPE_I;
|
||||||
p->key_frame= 1;
|
p->key_frame= 1;
|
||||||
|
|
||||||
emms_c();
|
|
||||||
|
|
||||||
avpriv_align_put_bits(&a->pb);
|
avpriv_align_put_bits(&a->pb);
|
||||||
while(get_bit_count(&a->pb)&31)
|
while(get_bit_count(&a->pb)&31)
|
||||||
put_bits(&a->pb, 8, 0);
|
put_bits(&a->pb, 8, 0);
|
||||||
|
@ -19,6 +19,28 @@
|
|||||||
#ifndef AVDEVICE_AVDEVICE_H
|
#ifndef AVDEVICE_AVDEVICE_H
|
||||||
#define AVDEVICE_AVDEVICE_H
|
#define AVDEVICE_AVDEVICE_H
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file
|
||||||
|
* @ingroup lavd
|
||||||
|
* Main libavdevice API header
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @defgroup lavd Special devices muxing/demuxing library
|
||||||
|
* @{
|
||||||
|
* Libavdevice is a complementary library to @ref libavf "libavformat". It
|
||||||
|
* provides various "special" platform-specific muxers and demuxers, e.g. for
|
||||||
|
* grabbing devices, audio capture and playback etc. As a consequence, the
|
||||||
|
* (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own
|
||||||
|
* I/O functions). The filename passed to avformat_open_input() often does not
|
||||||
|
* refer to an actually existing file, but has some special device-specific
|
||||||
|
* meaning - e.g. for the x11grab device it is the display name.
|
||||||
|
*
|
||||||
|
* To use libavdevice, simply call avdevice_register_all() to register all
|
||||||
|
* compiled muxers and demuxers. They all use standard libavformat API.
|
||||||
|
* @}
|
||||||
|
*/
|
||||||
|
|
||||||
#include "libavutil/avutil.h"
|
#include "libavutil/avutil.h"
|
||||||
#include "libavformat/avformat.h"
|
#include "libavformat/avformat.h"
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ static av_cold void movie_common_uninit(AVFilterContext *ctx)
|
|||||||
if (movie->codec_ctx)
|
if (movie->codec_ctx)
|
||||||
avcodec_close(movie->codec_ctx);
|
avcodec_close(movie->codec_ctx);
|
||||||
if (movie->format_ctx)
|
if (movie->format_ctx)
|
||||||
av_close_input_file(movie->format_ctx);
|
avformat_close_input(&movie->format_ctx);
|
||||||
|
|
||||||
avfilter_unref_buffer(movie->picref);
|
avfilter_unref_buffer(movie->picref);
|
||||||
av_freep(&movie->frame);
|
av_freep(&movie->frame);
|
||||||
|
@ -132,7 +132,7 @@ static void free_variant_list(AppleHTTPContext *c)
|
|||||||
ffurl_close(var->input);
|
ffurl_close(var->input);
|
||||||
if (var->ctx) {
|
if (var->ctx) {
|
||||||
var->ctx->pb = NULL;
|
var->ctx->pb = NULL;
|
||||||
av_close_input_file(var->ctx);
|
avformat_close_input(&var->ctx);
|
||||||
}
|
}
|
||||||
av_free(var);
|
av_free(var);
|
||||||
}
|
}
|
||||||
|
@ -31,8 +31,81 @@
|
|||||||
* @defgroup libavf I/O and Muxing/Demuxing Library
|
* @defgroup libavf I/O and Muxing/Demuxing Library
|
||||||
* @{
|
* @{
|
||||||
*
|
*
|
||||||
|
* Libavformat (lavf) is a library for dealing with various media container
|
||||||
|
* formats. Its main two purposes are demuxing - i.e. splitting a media file
|
||||||
|
* into component streams, and the reverse process of muxing - writing supplied
|
||||||
|
* data in a specified container format. It also has an @ref lavf_io
|
||||||
|
* "I/O module" which supports a number of protocols for accessing the data (e.g.
|
||||||
|
* file, tcp, http and others). Before using lavf, you need to call
|
||||||
|
* av_register_all() to register all compiled muxers, demuxers and protocols.
|
||||||
|
* Unless you are absolutely sure you won't use libavformat's network
|
||||||
|
* capabilities, you should also call avformat_network_init().
|
||||||
|
*
|
||||||
|
* A supported input format is described by an AVInputFormat struct, conversely
|
||||||
|
* an output format is described by AVOutputFormat. You can iterate over all
|
||||||
|
* registered input/output formats using the av_iformat_next() /
|
||||||
|
* av_oformat_next() functions. The protocols layer is not part of the public
|
||||||
|
* API, so you can only get the names of supported protocols with the
|
||||||
|
* avio_enum_protocols() function.
|
||||||
|
*
|
||||||
|
* Main lavf structure used for both muxing and demuxing is AVFormatContext,
|
||||||
|
* which exports all information about the file being read or written. As with
|
||||||
|
* most Libav structures, its size is not part of public ABI, so it cannot be
|
||||||
|
* allocated on stack or directly with av_malloc(). To create an
|
||||||
|
* AVFormatContext, use avformat_alloc_context() (some functions, like
|
||||||
|
* avformat_open_input() might do that for you).
|
||||||
|
*
|
||||||
|
* Most importantly an AVFormatContext contains:
|
||||||
|
* @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat
|
||||||
|
* "output" format. It is either autodetected or set by user for input;
|
||||||
|
* always set by user for output.
|
||||||
|
* @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all
|
||||||
|
* elementary streams stored in the file. AVStreams are typically referred to
|
||||||
|
* using their index in this array.
|
||||||
|
* @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or
|
||||||
|
* set by user for input, always set by user for output (unless you are dealing
|
||||||
|
* with an AVFMT_NOFILE format).
|
||||||
|
*
|
||||||
* @defgroup lavf_decoding Demuxing
|
* @defgroup lavf_decoding Demuxing
|
||||||
* @{
|
* @{
|
||||||
|
* Demuxers read a media file and split it into chunks of data (@em packets). A
|
||||||
|
* @ref AVPacket "packet" contains one or more frames which belong a single
|
||||||
|
* elementary stream. In lavf API this process is represented by the
|
||||||
|
* avformat_open_input() function for opening a file, av_read_frame() for
|
||||||
|
* reading a single packet and finally avformat_close_input(), which does the
|
||||||
|
* cleanup.
|
||||||
|
*
|
||||||
|
* @section lavf_decoding_open Opening a media file
|
||||||
|
* The minimum information required to open a file is its URL or filename, which
|
||||||
|
* is passed to avformat_open_input(), as in the following code:
|
||||||
|
* @code
|
||||||
|
* const char *url = "in.mp3";
|
||||||
|
* AVFormatContext *s = NULL;
|
||||||
|
* int ret = avformat_open_input(&s, url, NULL, NULL);
|
||||||
|
* if (ret < 0)
|
||||||
|
* abort();
|
||||||
|
* @endcode
|
||||||
|
* The above code attempts to allocate an AVFormatContext, open the
|
||||||
|
* specified file (autodetecting the format) and read the header, exporting the
|
||||||
|
* information stored there into s. Some formats do not have a header or do not
|
||||||
|
* store enough information there, so it is recommended that you call the
|
||||||
|
* avformat_find_stream_info() function which tries to read and decode a few
|
||||||
|
* frames to find missing information.
|
||||||
|
*
|
||||||
|
* In some cases you might want to preallocate an AVFormatContext yourself with
|
||||||
|
* avformat_alloc_context() and do some tweaking on it before passing it to
|
||||||
|
* avformat_open_input(). One such case is when you want to use custom functions
|
||||||
|
* for reading input data instead of lavf internal I/O layer.
|
||||||
|
* To do that, create your own AVIOContext with avio_alloc_context(), passing
|
||||||
|
* your reading callbacks to it. Then set the @em pb field of your
|
||||||
|
* AVFormatContext to newly created AVIOContext.
|
||||||
|
*
|
||||||
|
* After you have finished reading the file, you must close it with
|
||||||
|
* avformat_close_input(). It will free everything associated with the file.
|
||||||
|
*
|
||||||
|
* @section lavf_decoding_read Reading from an opened file
|
||||||
|
*
|
||||||
|
* @section lavf_decoding_seek Seeking
|
||||||
* @}
|
* @}
|
||||||
*
|
*
|
||||||
* @defgroup lavf_encoding Muxing
|
* @defgroup lavf_encoding Muxing
|
||||||
@ -62,21 +135,6 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the LIBAVFORMAT_VERSION_INT constant.
|
|
||||||
*/
|
|
||||||
unsigned avformat_version(void);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the libavformat build-time configuration.
|
|
||||||
*/
|
|
||||||
const char *avformat_configuration(void);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the libavformat license.
|
|
||||||
*/
|
|
||||||
const char *avformat_license(void);
|
|
||||||
|
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
#include <stdio.h> /* FILE */
|
#include <stdio.h> /* FILE */
|
||||||
#include "libavcodec/avcodec.h"
|
#include "libavcodec/avcodec.h"
|
||||||
@ -463,7 +521,7 @@ typedef struct AVInputFormat {
|
|||||||
int stream_index, int64_t timestamp, int flags);
|
int stream_index, int64_t timestamp, int flags);
|
||||||
#endif
|
#endif
|
||||||
/**
|
/**
|
||||||
* Gets the next timestamp in stream[stream_index].time_base units.
|
* Get the next timestamp in stream[stream_index].time_base units.
|
||||||
* @return the timestamp or AV_NOPTS_VALUE if an error occurred
|
* @return the timestamp or AV_NOPTS_VALUE if an error occurred
|
||||||
*/
|
*/
|
||||||
int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index,
|
int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index,
|
||||||
@ -1176,6 +1234,60 @@ typedef struct AVPacketList {
|
|||||||
struct AVPacketList *next;
|
struct AVPacketList *next;
|
||||||
} AVPacketList;
|
} AVPacketList;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @defgroup lavf_core Core functions
|
||||||
|
* @ingroup libavf
|
||||||
|
*
|
||||||
|
* Functions for querying libavformat capabilities, allocating core structures,
|
||||||
|
* etc.
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the LIBAVFORMAT_VERSION_INT constant.
|
||||||
|
*/
|
||||||
|
unsigned avformat_version(void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the libavformat build-time configuration.
|
||||||
|
*/
|
||||||
|
const char *avformat_configuration(void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the libavformat license.
|
||||||
|
*/
|
||||||
|
const char *avformat_license(void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize libavformat and register all the muxers, demuxers and
|
||||||
|
* protocols. If you do not call this function, then you can select
|
||||||
|
* exactly which formats you want to support.
|
||||||
|
*
|
||||||
|
* @see av_register_input_format()
|
||||||
|
* @see av_register_output_format()
|
||||||
|
* @see av_register_protocol()
|
||||||
|
*/
|
||||||
|
void av_register_all(void);
|
||||||
|
|
||||||
|
void av_register_input_format(AVInputFormat *format);
|
||||||
|
void av_register_output_format(AVOutputFormat *format);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Do global initialization of network components. This is optional,
|
||||||
|
* but recommended, since it avoids the overhead of implicitly
|
||||||
|
* doing the setup for each session.
|
||||||
|
*
|
||||||
|
* Calling this function will become mandatory if using network
|
||||||
|
* protocols at some major version bump.
|
||||||
|
*/
|
||||||
|
int avformat_network_init(void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Undo the initialization done by avformat_network_init.
|
||||||
|
*/
|
||||||
|
int avformat_network_deinit(void);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If f is NULL, returns the first registered input format,
|
* If f is NULL, returns the first registered input format,
|
||||||
* if f is non-NULL, returns the next registered input format after f
|
* if f is non-NULL, returns the next registered input format after f
|
||||||
@ -1190,131 +1302,61 @@ AVInputFormat *av_iformat_next(AVInputFormat *f);
|
|||||||
*/
|
*/
|
||||||
AVOutputFormat *av_oformat_next(AVOutputFormat *f);
|
AVOutputFormat *av_oformat_next(AVOutputFormat *f);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allocate an AVFormatContext.
|
||||||
|
* avformat_free_context() can be used to free the context and everything
|
||||||
|
* allocated by the framework within it.
|
||||||
|
*/
|
||||||
|
AVFormatContext *avformat_alloc_context(void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Free an AVFormatContext and all its streams.
|
||||||
|
* @param s context to free
|
||||||
|
*/
|
||||||
|
void avformat_free_context(AVFormatContext *s);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the AVClass for AVFormatContext. It can be used in combination with
|
||||||
|
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
|
||||||
|
*
|
||||||
|
* @see av_opt_find().
|
||||||
|
*/
|
||||||
|
const AVClass *avformat_get_class(void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a new stream to a media file.
|
||||||
|
*
|
||||||
|
* When demuxing, it is called by the demuxer in read_header(). If the
|
||||||
|
* flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also
|
||||||
|
* be called in read_packet().
|
||||||
|
*
|
||||||
|
* When muxing, should be called by the user before avformat_write_header().
|
||||||
|
*
|
||||||
|
* @param c If non-NULL, the AVCodecContext corresponding to the new stream
|
||||||
|
* will be initialized to use this codec. This is needed for e.g. codec-specific
|
||||||
|
* defaults to be set, so codec should be provided if it is known.
|
||||||
|
*
|
||||||
|
* @return newly created stream or NULL on error.
|
||||||
|
*/
|
||||||
|
AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c);
|
||||||
|
|
||||||
|
AVProgram *av_new_program(AVFormatContext *s, int id);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @}
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
#if FF_API_GUESS_IMG2_CODEC
|
#if FF_API_GUESS_IMG2_CODEC
|
||||||
attribute_deprecated enum CodecID av_guess_image2_codec(const char *filename);
|
attribute_deprecated enum CodecID av_guess_image2_codec(const char *filename);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* XXX: Use automatic init with either ELF sections or C file parser */
|
|
||||||
/* modules. */
|
|
||||||
|
|
||||||
/* utils.c */
|
|
||||||
void av_register_input_format(AVInputFormat *format);
|
|
||||||
void av_register_output_format(AVOutputFormat *format);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the output format in the list of registered output formats
|
|
||||||
* which best matches the provided parameters, or return NULL if
|
|
||||||
* there is no match.
|
|
||||||
*
|
|
||||||
* @param short_name if non-NULL checks if short_name matches with the
|
|
||||||
* names of the registered formats
|
|
||||||
* @param filename if non-NULL checks if filename terminates with the
|
|
||||||
* extensions of the registered formats
|
|
||||||
* @param mime_type if non-NULL checks if mime_type matches with the
|
|
||||||
* MIME type of the registered formats
|
|
||||||
*/
|
|
||||||
AVOutputFormat *av_guess_format(const char *short_name,
|
|
||||||
const char *filename,
|
|
||||||
const char *mime_type);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Guess the codec ID based upon muxer and filename.
|
|
||||||
*/
|
|
||||||
enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
|
|
||||||
const char *filename, const char *mime_type,
|
|
||||||
enum AVMediaType type);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Send a nice hexadecimal dump of a buffer to the specified file stream.
|
|
||||||
*
|
|
||||||
* @param f The file stream pointer where the dump should be sent to.
|
|
||||||
* @param buf buffer
|
|
||||||
* @param size buffer size
|
|
||||||
*
|
|
||||||
* @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2
|
|
||||||
*/
|
|
||||||
void av_hex_dump(FILE *f, uint8_t *buf, int size);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Send a nice hexadecimal dump of a buffer to the log.
|
|
||||||
*
|
|
||||||
* @param avcl A pointer to an arbitrary struct of which the first field is a
|
|
||||||
* pointer to an AVClass struct.
|
|
||||||
* @param level The importance level of the message, lower values signifying
|
|
||||||
* higher importance.
|
|
||||||
* @param buf buffer
|
|
||||||
* @param size buffer size
|
|
||||||
*
|
|
||||||
* @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2
|
|
||||||
*/
|
|
||||||
void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Send a nice dump of a packet to the specified file stream.
|
|
||||||
*
|
|
||||||
* @param f The file stream pointer where the dump should be sent to.
|
|
||||||
* @param pkt packet to dump
|
|
||||||
* @param dump_payload True if the payload must be displayed, too.
|
|
||||||
* @param st AVStream that the packet belongs to
|
|
||||||
*/
|
|
||||||
void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st);
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Send a nice dump of a packet to the log.
|
|
||||||
*
|
|
||||||
* @param avcl A pointer to an arbitrary struct of which the first field is a
|
|
||||||
* pointer to an AVClass struct.
|
|
||||||
* @param level The importance level of the message, lower values signifying
|
|
||||||
* higher importance.
|
|
||||||
* @param pkt packet to dump
|
|
||||||
* @param dump_payload True if the payload must be displayed, too.
|
|
||||||
* @param st AVStream that the packet belongs to
|
|
||||||
*/
|
|
||||||
void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
|
|
||||||
AVStream *st);
|
|
||||||
|
|
||||||
#if FF_API_PKT_DUMP
|
#if FF_API_PKT_DUMP
|
||||||
attribute_deprecated void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload);
|
attribute_deprecated void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload);
|
||||||
attribute_deprecated void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt,
|
attribute_deprecated void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt,
|
||||||
int dump_payload);
|
int dump_payload);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize libavformat and register all the muxers, demuxers and
|
|
||||||
* protocols. If you do not call this function, then you can select
|
|
||||||
* exactly which formats you want to support.
|
|
||||||
*
|
|
||||||
* @see av_register_input_format()
|
|
||||||
* @see av_register_output_format()
|
|
||||||
* @see av_register_protocol()
|
|
||||||
*/
|
|
||||||
void av_register_all(void);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the CodecID for the given codec tag tag.
|
|
||||||
* If no codec id is found returns CODEC_ID_NONE.
|
|
||||||
*
|
|
||||||
* @param tags list of supported codec_id-codec_tag pairs, as stored
|
|
||||||
* in AVInputFormat.codec_tag and AVOutputFormat.codec_tag
|
|
||||||
*/
|
|
||||||
enum CodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the codec tag for the given codec id id.
|
|
||||||
* If no codec tag is found returns 0.
|
|
||||||
*
|
|
||||||
* @param tags list of supported codec_id-codec_tag pairs, as stored
|
|
||||||
* in AVInputFormat.codec_tag and AVOutputFormat.codec_tag
|
|
||||||
*/
|
|
||||||
unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum CodecID id);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Allocate an AVFormatContext.
|
|
||||||
* avformat_free_context() can be used to free the context and everything
|
|
||||||
* allocated by the framework within it.
|
|
||||||
*/
|
|
||||||
AVFormatContext *avformat_alloc_context(void);
|
|
||||||
|
|
||||||
#if FF_API_ALLOC_OUTPUT_CONTEXT
|
#if FF_API_ALLOC_OUTPUT_CONTEXT
|
||||||
/**
|
/**
|
||||||
@ -1636,28 +1678,36 @@ int av_read_play(AVFormatContext *s);
|
|||||||
*/
|
*/
|
||||||
int av_read_pause(AVFormatContext *s);
|
int av_read_pause(AVFormatContext *s);
|
||||||
|
|
||||||
|
#if FF_API_FORMAT_PARAMETERS
|
||||||
/**
|
/**
|
||||||
* Free a AVFormatContext allocated by av_open_input_stream.
|
* Free a AVFormatContext allocated by av_open_input_stream.
|
||||||
* @param s context to free
|
* @param s context to free
|
||||||
|
* @deprecated use av_close_input_file()
|
||||||
*/
|
*/
|
||||||
|
attribute_deprecated
|
||||||
void av_close_input_stream(AVFormatContext *s);
|
void av_close_input_stream(AVFormatContext *s);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if FF_API_CLOSE_INPUT_FILE
|
||||||
/**
|
/**
|
||||||
|
* @deprecated use avformat_close_input()
|
||||||
* Close a media file (but not its codecs).
|
* Close a media file (but not its codecs).
|
||||||
*
|
*
|
||||||
* @param s media file handle
|
* @param s media file handle
|
||||||
*/
|
*/
|
||||||
|
attribute_deprecated
|
||||||
void av_close_input_file(AVFormatContext *s);
|
void av_close_input_file(AVFormatContext *s);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Close an opened input AVFormatContext. Free it and all its contents
|
||||||
|
* and set *s to NULL.
|
||||||
|
*/
|
||||||
|
void avformat_close_input(AVFormatContext **s);
|
||||||
/**
|
/**
|
||||||
* @}
|
* @}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
|
||||||
* Free an AVFormatContext and all its streams.
|
|
||||||
* @param s context to free
|
|
||||||
*/
|
|
||||||
void avformat_free_context(AVFormatContext *s);
|
|
||||||
|
|
||||||
#if FF_API_NEW_STREAM
|
#if FF_API_NEW_STREAM
|
||||||
/**
|
/**
|
||||||
* Add a new stream to a media file.
|
* Add a new stream to a media file.
|
||||||
@ -1673,25 +1723,6 @@ attribute_deprecated
|
|||||||
AVStream *av_new_stream(AVFormatContext *s, int id);
|
AVStream *av_new_stream(AVFormatContext *s, int id);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
|
||||||
* Add a new stream to a media file.
|
|
||||||
*
|
|
||||||
* When demuxing, it is called by the demuxer in read_header(). If the
|
|
||||||
* flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also
|
|
||||||
* be called in read_packet().
|
|
||||||
*
|
|
||||||
* When muxing, should be called by the user before avformat_write_header().
|
|
||||||
*
|
|
||||||
* @param c If non-NULL, the AVCodecContext corresponding to the new stream
|
|
||||||
* will be initialized to use this codec. This is needed for e.g. codec-specific
|
|
||||||
* defaults to be set, so codec should be provided if it is known.
|
|
||||||
*
|
|
||||||
* @return newly created stream or NULL on error.
|
|
||||||
*/
|
|
||||||
AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c);
|
|
||||||
|
|
||||||
AVProgram *av_new_program(AVFormatContext *s, int id);
|
|
||||||
|
|
||||||
#if FF_API_SET_PTS_INFO
|
#if FF_API_SET_PTS_INFO
|
||||||
/**
|
/**
|
||||||
* @deprecated this function is not supposed to be called outside of lavf
|
* @deprecated this function is not supposed to be called outside of lavf
|
||||||
@ -1706,27 +1737,6 @@ void av_set_pts_info(AVStream *s, int pts_wrap_bits,
|
|||||||
#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes
|
#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes
|
||||||
#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number
|
#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number
|
||||||
|
|
||||||
int av_find_default_stream_index(AVFormatContext *s);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the index for a specific timestamp.
|
|
||||||
* @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond
|
|
||||||
* to the timestamp which is <= the requested one, if backward
|
|
||||||
* is 0, then it will be >=
|
|
||||||
* if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
|
|
||||||
* @return < 0 if no such timestamp could be found
|
|
||||||
*/
|
|
||||||
int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Add an index entry into a sorted list. Update the entry if the list
|
|
||||||
* already contains it.
|
|
||||||
*
|
|
||||||
* @param timestamp timestamp in the time base of the given stream
|
|
||||||
*/
|
|
||||||
int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
|
|
||||||
int size, int distance, int flags);
|
|
||||||
|
|
||||||
#if FF_API_SEEK_PUBLIC
|
#if FF_API_SEEK_PUBLIC
|
||||||
attribute_deprecated
|
attribute_deprecated
|
||||||
int av_seek_frame_binary(AVFormatContext *s, int stream_index,
|
int av_seek_frame_binary(AVFormatContext *s, int stream_index,
|
||||||
@ -1749,31 +1759,6 @@ int64_t av_gen_search(AVFormatContext *s, int stream_index,
|
|||||||
attribute_deprecated int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap);
|
attribute_deprecated int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
|
||||||
* Split a URL string into components.
|
|
||||||
*
|
|
||||||
* The pointers to buffers for storing individual components may be null,
|
|
||||||
* in order to ignore that component. Buffers for components not found are
|
|
||||||
* set to empty strings. If the port is not found, it is set to a negative
|
|
||||||
* value.
|
|
||||||
*
|
|
||||||
* @param proto the buffer for the protocol
|
|
||||||
* @param proto_size the size of the proto buffer
|
|
||||||
* @param authorization the buffer for the authorization
|
|
||||||
* @param authorization_size the size of the authorization buffer
|
|
||||||
* @param hostname the buffer for the host name
|
|
||||||
* @param hostname_size the size of the hostname buffer
|
|
||||||
* @param port_ptr a pointer to store the port number in
|
|
||||||
* @param path the buffer for the path
|
|
||||||
* @param path_size the size of the path buffer
|
|
||||||
* @param url the URL to split
|
|
||||||
*/
|
|
||||||
void av_url_split(char *proto, int proto_size,
|
|
||||||
char *authorization, int authorization_size,
|
|
||||||
char *hostname, int hostname_size,
|
|
||||||
int *port_ptr,
|
|
||||||
char *path, int path_size,
|
|
||||||
const char *url);
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup lavf_encoding
|
* @addtogroup lavf_encoding
|
||||||
* @{
|
* @{
|
||||||
@ -1869,9 +1854,29 @@ int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
|
|||||||
* @return 0 if OK, AVERROR_xxx on error
|
* @return 0 if OK, AVERROR_xxx on error
|
||||||
*/
|
*/
|
||||||
int av_write_trailer(AVFormatContext *s);
|
int av_write_trailer(AVFormatContext *s);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @}
|
* Return the output format in the list of registered output formats
|
||||||
|
* which best matches the provided parameters, or return NULL if
|
||||||
|
* there is no match.
|
||||||
|
*
|
||||||
|
* @param short_name if non-NULL checks if short_name matches with the
|
||||||
|
* names of the registered formats
|
||||||
|
* @param filename if non-NULL checks if filename terminates with the
|
||||||
|
* extensions of the registered formats
|
||||||
|
* @param mime_type if non-NULL checks if mime_type matches with the
|
||||||
|
* MIME type of the registered formats
|
||||||
*/
|
*/
|
||||||
|
AVOutputFormat *av_guess_format(const char *short_name,
|
||||||
|
const char *filename,
|
||||||
|
const char *mime_type);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Guess the codec ID based upon muxer and filename.
|
||||||
|
*/
|
||||||
|
enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
|
||||||
|
const char *filename, const char *mime_type,
|
||||||
|
enum AVMediaType type);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get timing information for the data currently output.
|
* Get timing information for the data currently output.
|
||||||
@ -1891,6 +1896,137 @@ int av_write_trailer(AVFormatContext *s);
|
|||||||
int av_get_output_timestamp(struct AVFormatContext *s, int stream,
|
int av_get_output_timestamp(struct AVFormatContext *s, int stream,
|
||||||
int64_t *dts, int64_t *wall);
|
int64_t *dts, int64_t *wall);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @}
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @defgroup lavf_misc Utility functions
|
||||||
|
* @ingroup libavf
|
||||||
|
* @{
|
||||||
|
*
|
||||||
|
* Miscelaneous utility functions related to both muxing and demuxing
|
||||||
|
* (or neither).
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Send a nice hexadecimal dump of a buffer to the specified file stream.
|
||||||
|
*
|
||||||
|
* @param f The file stream pointer where the dump should be sent to.
|
||||||
|
* @param buf buffer
|
||||||
|
* @param size buffer size
|
||||||
|
*
|
||||||
|
* @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2
|
||||||
|
*/
|
||||||
|
void av_hex_dump(FILE *f, uint8_t *buf, int size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Send a nice hexadecimal dump of a buffer to the log.
|
||||||
|
*
|
||||||
|
* @param avcl A pointer to an arbitrary struct of which the first field is a
|
||||||
|
* pointer to an AVClass struct.
|
||||||
|
* @param level The importance level of the message, lower values signifying
|
||||||
|
* higher importance.
|
||||||
|
* @param buf buffer
|
||||||
|
* @param size buffer size
|
||||||
|
*
|
||||||
|
* @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2
|
||||||
|
*/
|
||||||
|
void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Send a nice dump of a packet to the specified file stream.
|
||||||
|
*
|
||||||
|
* @param f The file stream pointer where the dump should be sent to.
|
||||||
|
* @param pkt packet to dump
|
||||||
|
* @param dump_payload True if the payload must be displayed, too.
|
||||||
|
* @param st AVStream that the packet belongs to
|
||||||
|
*/
|
||||||
|
void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Send a nice dump of a packet to the log.
|
||||||
|
*
|
||||||
|
* @param avcl A pointer to an arbitrary struct of which the first field is a
|
||||||
|
* pointer to an AVClass struct.
|
||||||
|
* @param level The importance level of the message, lower values signifying
|
||||||
|
* higher importance.
|
||||||
|
* @param pkt packet to dump
|
||||||
|
* @param dump_payload True if the payload must be displayed, too.
|
||||||
|
* @param st AVStream that the packet belongs to
|
||||||
|
*/
|
||||||
|
void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
|
||||||
|
AVStream *st);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the CodecID for the given codec tag tag.
|
||||||
|
* If no codec id is found returns CODEC_ID_NONE.
|
||||||
|
*
|
||||||
|
* @param tags list of supported codec_id-codec_tag pairs, as stored
|
||||||
|
* in AVInputFormat.codec_tag and AVOutputFormat.codec_tag
|
||||||
|
*/
|
||||||
|
enum CodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the codec tag for the given codec id id.
|
||||||
|
* If no codec tag is found returns 0.
|
||||||
|
*
|
||||||
|
* @param tags list of supported codec_id-codec_tag pairs, as stored
|
||||||
|
* in AVInputFormat.codec_tag and AVOutputFormat.codec_tag
|
||||||
|
*/
|
||||||
|
unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum CodecID id);
|
||||||
|
|
||||||
|
int av_find_default_stream_index(AVFormatContext *s);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the index for a specific timestamp.
|
||||||
|
* @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond
|
||||||
|
* to the timestamp which is <= the requested one, if backward
|
||||||
|
* is 0, then it will be >=
|
||||||
|
* if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
|
||||||
|
* @return < 0 if no such timestamp could be found
|
||||||
|
*/
|
||||||
|
int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add an index entry into a sorted list. Update the entry if the list
|
||||||
|
* already contains it.
|
||||||
|
*
|
||||||
|
* @param timestamp timestamp in the time base of the given stream
|
||||||
|
*/
|
||||||
|
int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
|
||||||
|
int size, int distance, int flags);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Split a URL string into components.
|
||||||
|
*
|
||||||
|
* The pointers to buffers for storing individual components may be null,
|
||||||
|
* in order to ignore that component. Buffers for components not found are
|
||||||
|
* set to empty strings. If the port is not found, it is set to a negative
|
||||||
|
* value.
|
||||||
|
*
|
||||||
|
* @param proto the buffer for the protocol
|
||||||
|
* @param proto_size the size of the proto buffer
|
||||||
|
* @param authorization the buffer for the authorization
|
||||||
|
* @param authorization_size the size of the authorization buffer
|
||||||
|
* @param hostname the buffer for the host name
|
||||||
|
* @param hostname_size the size of the hostname buffer
|
||||||
|
* @param port_ptr a pointer to store the port number in
|
||||||
|
* @param path the buffer for the path
|
||||||
|
* @param path_size the size of the path buffer
|
||||||
|
* @param url the URL to split
|
||||||
|
*/
|
||||||
|
void av_url_split(char *proto, int proto_size,
|
||||||
|
char *authorization, int authorization_size,
|
||||||
|
char *hostname, int hostname_size,
|
||||||
|
int *port_ptr,
|
||||||
|
char *path, int path_size,
|
||||||
|
const char *url);
|
||||||
|
|
||||||
#if FF_API_DUMP_FORMAT
|
#if FF_API_DUMP_FORMAT
|
||||||
/**
|
/**
|
||||||
* @deprecated Deprecated in favor of av_dump_format().
|
* @deprecated Deprecated in favor of av_dump_format().
|
||||||
@ -1992,26 +2128,7 @@ int av_match_ext(const char *filename, const char *extensions);
|
|||||||
int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance);
|
int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the AVClass for AVFormatContext. It can be used in combination with
|
* @}
|
||||||
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
|
|
||||||
*
|
|
||||||
* @see av_opt_find().
|
|
||||||
*/
|
*/
|
||||||
const AVClass *avformat_get_class(void);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Do global initialization of network components. This is optional,
|
|
||||||
* but recommended, since it avoids the overhead of implicitly
|
|
||||||
* doing the setup for each session.
|
|
||||||
*
|
|
||||||
* Calling this function will become mandatory if using network
|
|
||||||
* protocols at some major version bump.
|
|
||||||
*/
|
|
||||||
int avformat_network_init(void);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Undo the initialization done by avformat_network_init.
|
|
||||||
*/
|
|
||||||
int avformat_network_deinit(void);
|
|
||||||
|
|
||||||
#endif /* AVFORMAT_AVFORMAT_H */
|
#endif /* AVFORMAT_AVFORMAT_H */
|
||||||
|
@ -1450,7 +1450,7 @@ static int avi_read_close(AVFormatContext *s)
|
|||||||
if (ast) {
|
if (ast) {
|
||||||
if (ast->sub_ctx) {
|
if (ast->sub_ctx) {
|
||||||
av_freep(&ast->sub_ctx->pb);
|
av_freep(&ast->sub_ctx->pb);
|
||||||
av_close_input_file(ast->sub_ctx);
|
avformat_close_input(&ast->sub_ctx);
|
||||||
}
|
}
|
||||||
av_free(ast->sub_buffer);
|
av_free(ast->sub_buffer);
|
||||||
av_free_packet(&ast->sub_pkt);
|
av_free_packet(&ast->sub_pkt);
|
||||||
|
@ -380,7 +380,7 @@ static void mpegts_close_filter(MpegTSContext *ts, MpegTSFilter *filter)
|
|||||||
PESContext *pes = filter->u.pes_filter.opaque;
|
PESContext *pes = filter->u.pes_filter.opaque;
|
||||||
av_freep(&pes->buffer);
|
av_freep(&pes->buffer);
|
||||||
/* referenced private data will be freed later in
|
/* referenced private data will be freed later in
|
||||||
* av_close_input_stream */
|
* avformat_close_input */
|
||||||
if (!((PESContext *)filter->u.pes_filter.opaque)->st) {
|
if (!((PESContext *)filter->u.pes_filter.opaque)->st) {
|
||||||
av_freep(&filter->u.pes_filter.opaque);
|
av_freep(&filter->u.pes_filter.opaque);
|
||||||
}
|
}
|
||||||
|
@ -544,7 +544,7 @@ rdt_free_context (PayloadContext *rdt)
|
|||||||
av_freep(&rdt->rmst[i]);
|
av_freep(&rdt->rmst[i]);
|
||||||
}
|
}
|
||||||
if (rdt->rmctx)
|
if (rdt->rmctx)
|
||||||
av_close_input_file(rdt->rmctx);
|
avformat_close_input(&rdt->rmctx);
|
||||||
av_freep(&rdt->mlti_data);
|
av_freep(&rdt->mlti_data);
|
||||||
av_freep(&rdt->rmst);
|
av_freep(&rdt->rmst);
|
||||||
av_free(rdt);
|
av_free(rdt);
|
||||||
|
@ -108,8 +108,7 @@ int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p)
|
|||||||
"Failed to fix invalid RTSP-MS/ASF min_pktsize\n");
|
"Failed to fix invalid RTSP-MS/ASF min_pktsize\n");
|
||||||
init_packetizer(&pb, buf, len);
|
init_packetizer(&pb, buf, len);
|
||||||
if (rt->asf_ctx) {
|
if (rt->asf_ctx) {
|
||||||
av_close_input_file(rt->asf_ctx);
|
avformat_close_input(&rt->asf_ctx);
|
||||||
rt->asf_ctx = NULL;
|
|
||||||
}
|
}
|
||||||
if (!(rt->asf_ctx = avformat_alloc_context()))
|
if (!(rt->asf_ctx = avformat_alloc_context()))
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
@ -52,8 +52,8 @@ struct PayloadContext {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parses configuration (basically the codec-specific extradata) from
|
* Parse configuration (basically the codec-specific extradata) from
|
||||||
* a RTP config subpacket (starts with 0xff).
|
* an RTP config subpacket (starts with 0xff).
|
||||||
*
|
*
|
||||||
* Layout of the config subpacket (in bytes):
|
* Layout of the config subpacket (in bytes):
|
||||||
* 1: 0xFF <- config ID
|
* 1: 0xFF <- config ID
|
||||||
@ -128,7 +128,7 @@ static int qdm2_parse_config(PayloadContext *qdm, AVStream *st,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parses a single subpacket. We store this subpacket in an intermediate
|
* Parse a single subpacket. We store this subpacket in an intermediate
|
||||||
* buffer (position depends on the ID (byte[0]). When called, at least
|
* buffer (position depends on the ID (byte[0]). When called, at least
|
||||||
* 4 bytes are available for reading (see qdm2_parse_packet()).
|
* 4 bytes are available for reading (see qdm2_parse_packet()).
|
||||||
*
|
*
|
||||||
@ -179,7 +179,7 @@ static int qdm2_parse_subpacket(PayloadContext *qdm, AVStream *st,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Adds a superblock header around a set of subpackets.
|
* Add a superblock header around a set of subpackets.
|
||||||
*
|
*
|
||||||
* @return <0 on error, else 0.
|
* @return <0 on error, else 0.
|
||||||
*/
|
*/
|
||||||
|
@ -580,8 +580,7 @@ void ff_rtsp_close_streams(AVFormatContext *s)
|
|||||||
}
|
}
|
||||||
av_free(rt->rtsp_streams);
|
av_free(rt->rtsp_streams);
|
||||||
if (rt->asf_ctx) {
|
if (rt->asf_ctx) {
|
||||||
av_close_input_stream (rt->asf_ctx);
|
avformat_close_input(&rt->asf_ctx);
|
||||||
rt->asf_ctx = NULL;
|
|
||||||
}
|
}
|
||||||
av_free(rt->p);
|
av_free(rt->p);
|
||||||
av_free(rt->recvbuf);
|
av_free(rt->recvbuf);
|
||||||
|
@ -186,7 +186,7 @@ enum RTSPClientState {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Identifies particular servers that require special handling, such as
|
* Identify particular servers that require special handling, such as
|
||||||
* standards-incompliant "Transport:" lines in the SETUP request.
|
* standards-incompliant "Transport:" lines in the SETUP request.
|
||||||
*/
|
*/
|
||||||
enum RTSPServerType {
|
enum RTSPServerType {
|
||||||
@ -366,7 +366,7 @@ typedef struct RTSPState {
|
|||||||
source address and port. */
|
source address and port. */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Describes a single stream, as identified by a single m= line block in the
|
* Describe a single stream, as identified by a single m= line block in the
|
||||||
* SDP content. In the case of RDT, one RTSPStream can represent multiple
|
* SDP content. In the case of RDT, one RTSPStream can represent multiple
|
||||||
* AVStreams. In this case, each AVStream in this set has similar content
|
* AVStreams. In this case, each AVStream in this set has similar content
|
||||||
* (but different codec/bitrate).
|
* (but different codec/bitrate).
|
||||||
|
@ -52,7 +52,7 @@ static int sap_read_close(AVFormatContext *s)
|
|||||||
{
|
{
|
||||||
struct SAPState *sap = s->priv_data;
|
struct SAPState *sap = s->priv_data;
|
||||||
if (sap->sdp_ctx)
|
if (sap->sdp_ctx)
|
||||||
av_close_input_file(sap->sdp_ctx);
|
avformat_close_input(&sap->sdp_ctx);
|
||||||
if (sap->ann_fd)
|
if (sap->ann_fd)
|
||||||
ffurl_close(sap->ann_fd);
|
ffurl_close(sap->ann_fd);
|
||||||
av_freep(&sap->sdp);
|
av_freep(&sap->sdp);
|
||||||
|
@ -126,7 +126,7 @@ int main(int argc, char **argv)
|
|||||||
printf("ret:%-10s st:%2d flags:%d ts:%s\n", ret_str(ret), stream_id, i&1, ts_buf);
|
printf("ret:%-10s st:%2d flags:%d ts:%s\n", ret_str(ret), stream_id, i&1, ts_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
av_close_input_file(ic);
|
avformat_close_input(&ic);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2747,6 +2747,7 @@ int av_read_pause(AVFormatContext *s)
|
|||||||
return AVERROR(ENOSYS);
|
return AVERROR(ENOSYS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if FF_API_FORMAT_PARAMETERS
|
||||||
void av_close_input_stream(AVFormatContext *s)
|
void av_close_input_stream(AVFormatContext *s)
|
||||||
{
|
{
|
||||||
flush_packet_queue(s);
|
flush_packet_queue(s);
|
||||||
@ -2754,6 +2755,7 @@ void av_close_input_stream(AVFormatContext *s)
|
|||||||
s->iformat->read_close(s);
|
s->iformat->read_close(s);
|
||||||
avformat_free_context(s);
|
avformat_free_context(s);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void avformat_free_context(AVFormatContext *s)
|
void avformat_free_context(AVFormatContext *s)
|
||||||
{
|
{
|
||||||
@ -2797,11 +2799,23 @@ void avformat_free_context(AVFormatContext *s)
|
|||||||
av_free(s);
|
av_free(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if FF_API_CLOSE_INPUT_FILE
|
||||||
void av_close_input_file(AVFormatContext *s)
|
void av_close_input_file(AVFormatContext *s)
|
||||||
{
|
{
|
||||||
|
avformat_close_input(&s);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void avformat_close_input(AVFormatContext **ps)
|
||||||
|
{
|
||||||
|
AVFormatContext *s = *ps;
|
||||||
AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
|
AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
|
||||||
NULL : s->pb;
|
NULL : s->pb;
|
||||||
av_close_input_stream(s);
|
flush_packet_queue(s);
|
||||||
|
if (s->iformat->read_close)
|
||||||
|
s->iformat->read_close(s);
|
||||||
|
avformat_free_context(s);
|
||||||
|
*ps = NULL;
|
||||||
if (pb)
|
if (pb)
|
||||||
avio_close(pb);
|
avio_close(pb);
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
#include "libavutil/avutil.h"
|
#include "libavutil/avutil.h"
|
||||||
|
|
||||||
#define LIBAVFORMAT_VERSION_MAJOR 53
|
#define LIBAVFORMAT_VERSION_MAJOR 53
|
||||||
#define LIBAVFORMAT_VERSION_MINOR 24
|
#define LIBAVFORMAT_VERSION_MINOR 25
|
||||||
#define LIBAVFORMAT_VERSION_MICRO 0
|
#define LIBAVFORMAT_VERSION_MICRO 0
|
||||||
|
|
||||||
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
|
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
|
||||||
@ -125,5 +125,8 @@
|
|||||||
#ifndef FF_API_SET_PTS_INFO
|
#ifndef FF_API_SET_PTS_INFO
|
||||||
#define FF_API_SET_PTS_INFO (LIBAVFORMAT_VERSION_MAJOR < 54)
|
#define FF_API_SET_PTS_INFO (LIBAVFORMAT_VERSION_MAJOR < 54)
|
||||||
#endif
|
#endif
|
||||||
|
#ifndef FF_API_CLOSE_INPUT_FILE
|
||||||
|
#define FF_API_CLOSE_INPUT_FILE (LIBAVFORMAT_VERSION_MAJOR < 54)
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* AVFORMAT_VERSION_H */
|
#endif /* AVFORMAT_VERSION_H */
|
||||||
|
@ -39,7 +39,7 @@ int ff_vorbiscomment_length(AVDictionary *m, const char *vendor_string,
|
|||||||
unsigned *count);
|
unsigned *count);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Writes a VorbisComment into a buffer. The buffer, p, must have enough
|
* Write a VorbisComment into a buffer. The buffer, p, must have enough
|
||||||
* data to hold the whole VorbisComment. The minimum size required can be
|
* data to hold the whole VorbisComment. The minimum size required can be
|
||||||
* obtained by passing the same AVDictionary and vendor_string to
|
* obtained by passing the same AVDictionary and vendor_string to
|
||||||
* ff_vorbiscomment_length()
|
* ff_vorbiscomment_length()
|
||||||
|
@ -51,6 +51,7 @@ static av_always_inline av_const unsigned av_bswap16(unsigned x)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if !AV_GCC_VERSION_AT_LEAST(4,5)
|
||||||
#define av_bswap32 av_bswap32
|
#define av_bswap32 av_bswap32
|
||||||
static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
|
static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
|
||||||
{
|
{
|
||||||
@ -66,6 +67,7 @@ static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
|
|||||||
#endif /* HAVE_ARMV6 */
|
#endif /* HAVE_ARMV6 */
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
#endif /* !AV_GCC_VERSION_AT_LEAST(4,5) */
|
||||||
|
|
||||||
#endif /* __ARMCC_VERSION */
|
#endif /* __ARMCC_VERSION */
|
||||||
|
|
||||||
|
@ -171,7 +171,7 @@ char *av_get_token(const char **buf, const char *term);
|
|||||||
char *av_strtok(char *s, const char *delim, char **saveptr);
|
char *av_strtok(char *s, const char *delim, char **saveptr);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Locale independent conversion of ASCII characters to upper case.
|
* Locale-independent conversion of ASCII characters to uppercase.
|
||||||
*/
|
*/
|
||||||
static inline int av_toupper(int c)
|
static inline int av_toupper(int c)
|
||||||
{
|
{
|
||||||
@ -181,7 +181,7 @@ static inline int av_toupper(int c)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Locale independent conversion of ASCII characters to lower case.
|
* Locale-independent conversion of ASCII characters to lowercase.
|
||||||
*/
|
*/
|
||||||
static inline int av_tolower(int c)
|
static inline int av_tolower(int c)
|
||||||
{
|
{
|
||||||
@ -191,13 +191,13 @@ static inline int av_tolower(int c)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Locale independent case-insensitive compare.
|
* Locale-independent case-insensitive compare.
|
||||||
* @note This means only ASCII-range characters are case-insensitive
|
* @note This means only ASCII-range characters are case-insensitive
|
||||||
*/
|
*/
|
||||||
int av_strcasecmp(const char *a, const char *b);
|
int av_strcasecmp(const char *a, const char *b);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Locale independent case-insensitive compare.
|
* Locale-independent case-insensitive compare.
|
||||||
* @note This means only ASCII-range characters are case-insensitive
|
* @note This means only ASCII-range characters are case-insensitive
|
||||||
*/
|
*/
|
||||||
int av_strncasecmp(const char *a, const char *b, size_t n);
|
int av_strncasecmp(const char *a, const char *b, size_t n);
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
* @li @ref libavc "libavcodec" encoding/decoding library
|
* @li @ref libavc "libavcodec" encoding/decoding library
|
||||||
* @li @subpage libavfilter graph based frame editing library
|
* @li @subpage libavfilter graph based frame editing library
|
||||||
* @li @ref libavf "libavformat" I/O and muxing/demuxing library
|
* @li @ref libavf "libavformat" I/O and muxing/demuxing library
|
||||||
|
* @li @ref lavd "libavdevice" special devices muxing/demuxing library
|
||||||
* @li @ref lavu "libavutil" common utility library
|
* @li @ref lavu "libavutil" common utility library
|
||||||
* @li @subpage libpostproc post processing library
|
* @li @subpage libpostproc post processing library
|
||||||
* @li @subpage libswscale color conversion and scaling library
|
* @li @subpage libswscale color conversion and scaling library
|
||||||
|
@ -65,29 +65,14 @@ static av_always_inline av_const uint16_t av_bswap16(uint16_t x)
|
|||||||
#ifndef av_bswap32
|
#ifndef av_bswap32
|
||||||
static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
|
static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
|
||||||
{
|
{
|
||||||
x= ((x<<8)&0xFF00FF00) | ((x>>8)&0x00FF00FF);
|
return AV_BSWAP32C(x);
|
||||||
x= (x>>16) | (x<<16);
|
|
||||||
return x;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef av_bswap64
|
#ifndef av_bswap64
|
||||||
static inline uint64_t av_const av_bswap64(uint64_t x)
|
static inline uint64_t av_const av_bswap64(uint64_t x)
|
||||||
{
|
{
|
||||||
#if 0
|
return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32);
|
||||||
x= ((x<< 8)&0xFF00FF00FF00FF00ULL) | ((x>> 8)&0x00FF00FF00FF00FFULL);
|
|
||||||
x= ((x<<16)&0xFFFF0000FFFF0000ULL) | ((x>>16)&0x0000FFFF0000FFFFULL);
|
|
||||||
return (x>>32) | (x<<32);
|
|
||||||
#else
|
|
||||||
union {
|
|
||||||
uint64_t ll;
|
|
||||||
uint32_t l[2];
|
|
||||||
} w, r;
|
|
||||||
w.ll = x;
|
|
||||||
r.l[0] = av_bswap32 (w.l[1]);
|
|
||||||
r.l[1] = av_bswap32 (w.l[0]);
|
|
||||||
return r.ll;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -221,7 +221,7 @@ struct AVDictionary {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns NULL if a threading library has not been enabled.
|
* Return NULL if a threading library has not been enabled.
|
||||||
* Used to disable threading functions in AVCodec definitions
|
* Used to disable threading functions in AVCodec definitions
|
||||||
* when not needed.
|
* when not needed.
|
||||||
*/
|
*/
|
||||||
|
@ -75,7 +75,7 @@ int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
|
|||||||
void *log_ctx);
|
void *log_ctx);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parses timestr and returns in *time a corresponding number of
|
* Parse timestr and return in *time a corresponding number of
|
||||||
* microseconds.
|
* microseconds.
|
||||||
*
|
*
|
||||||
* @param timeval puts here the number of microseconds corresponding
|
* @param timeval puts here the number of microseconds corresponding
|
||||||
|
@ -28,24 +28,20 @@
|
|||||||
#include "config.h"
|
#include "config.h"
|
||||||
#include "libavutil/attributes.h"
|
#include "libavutil/attributes.h"
|
||||||
|
|
||||||
|
#if !AV_GCC_VERSION_AT_LEAST(4,1)
|
||||||
#define av_bswap16 av_bswap16
|
#define av_bswap16 av_bswap16
|
||||||
static av_always_inline av_const unsigned av_bswap16(unsigned x)
|
static av_always_inline av_const unsigned av_bswap16(unsigned x)
|
||||||
{
|
{
|
||||||
__asm__("rorw $8, %w0" : "+r"(x));
|
__asm__("rorw $8, %w0" : "+r"(x));
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
#endif /* !AV_GCC_VERSION_AT_LEAST(4,1) */
|
||||||
|
|
||||||
|
#if !AV_GCC_VERSION_AT_LEAST(4,5)
|
||||||
#define av_bswap32 av_bswap32
|
#define av_bswap32 av_bswap32
|
||||||
static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
|
static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
|
||||||
{
|
{
|
||||||
#if HAVE_BSWAP
|
|
||||||
__asm__("bswap %0" : "+r" (x));
|
__asm__("bswap %0" : "+r" (x));
|
||||||
#else
|
|
||||||
__asm__("rorw $8, %w0 \n\t"
|
|
||||||
"rorl $16, %0 \n\t"
|
|
||||||
"rorw $8, %w0"
|
|
||||||
: "+r"(x));
|
|
||||||
#endif
|
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,5 +53,6 @@ static inline uint64_t av_const av_bswap64(uint64_t x)
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
#endif /* !AV_GCC_VERSION_AT_LEAST(4,5) */
|
||||||
|
|
||||||
#endif /* AVUTIL_X86_BSWAP_H */
|
#endif /* AVUTIL_X86_BSWAP_H */
|
||||||
|
@ -83,9 +83,10 @@ void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* returns a pp_mode or NULL if an error occurred
|
* Return a pp_mode or NULL if an error occurred.
|
||||||
* name is the string after "-pp" on the command line
|
*
|
||||||
* quality is a number from 0 to PP_QUALITY_MAX
|
* @param name the string after "-pp" on the command line
|
||||||
|
* @param quality a number from 0 to PP_QUALITY_MAX
|
||||||
*/
|
*/
|
||||||
pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality);
|
pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality);
|
||||||
void pp_free_mode(pp_mode *mode);
|
void pp_free_mode(pp_mode *mode);
|
||||||
|
@ -1910,7 +1910,7 @@ MEDIAN((%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8))
|
|||||||
|
|
||||||
#if HAVE_MMX
|
#if HAVE_MMX
|
||||||
/**
|
/**
|
||||||
* transposes and shift the given 8x8 Block into dst1 and dst2
|
* Transpose and shift the given 8x8 Block into dst1 and dst2.
|
||||||
*/
|
*/
|
||||||
static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int srcStride)
|
static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int srcStride)
|
||||||
{
|
{
|
||||||
@ -1995,7 +1995,7 @@ static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* transposes the given 8x8 block
|
* Transpose the given 8x8 block.
|
||||||
*/
|
*/
|
||||||
static inline void RENAME(transpose2)(uint8_t *dst, int dstStride, uint8_t *src)
|
static inline void RENAME(transpose2)(uint8_t *dst, int dstStride, uint8_t *src)
|
||||||
{
|
{
|
||||||
|
@ -60,17 +60,17 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the LIBSWSCALE_VERSION_INT constant.
|
* Return the LIBSWSCALE_VERSION_INT constant.
|
||||||
*/
|
*/
|
||||||
unsigned swscale_version(void);
|
unsigned swscale_version(void);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the libswscale build-time configuration.
|
* Return the libswscale build-time configuration.
|
||||||
*/
|
*/
|
||||||
const char *swscale_configuration(void);
|
const char *swscale_configuration(void);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the libswscale license.
|
* Return the libswscale license.
|
||||||
*/
|
*/
|
||||||
const char *swscale_license(void);
|
const char *swscale_license(void);
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ const char *swscale_license(void);
|
|||||||
#define SWS_CS_DEFAULT 5
|
#define SWS_CS_DEFAULT 5
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a pointer to yuv<->rgb coefficients for the given colorspace
|
* Return a pointer to yuv<->rgb coefficients for the given colorspace
|
||||||
* suitable for sws_setColorspaceDetails().
|
* suitable for sws_setColorspaceDetails().
|
||||||
*
|
*
|
||||||
* @param colorspace One of the SWS_CS_* macros. If invalid,
|
* @param colorspace One of the SWS_CS_* macros. If invalid,
|
||||||
@ -154,26 +154,26 @@ typedef struct {
|
|||||||
struct SwsContext;
|
struct SwsContext;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a positive value if pix_fmt is a supported input format, 0
|
* Return a positive value if pix_fmt is a supported input format, 0
|
||||||
* otherwise.
|
* otherwise.
|
||||||
*/
|
*/
|
||||||
int sws_isSupportedInput(enum PixelFormat pix_fmt);
|
int sws_isSupportedInput(enum PixelFormat pix_fmt);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a positive value if pix_fmt is a supported output format, 0
|
* Return a positive value if pix_fmt is a supported output format, 0
|
||||||
* otherwise.
|
* otherwise.
|
||||||
*/
|
*/
|
||||||
int sws_isSupportedOutput(enum PixelFormat pix_fmt);
|
int sws_isSupportedOutput(enum PixelFormat pix_fmt);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocates an empty SwsContext. This must be filled and passed to
|
* Allocate an empty SwsContext. This must be filled and passed to
|
||||||
* sws_init_context(). For filling see AVOptions, options.c and
|
* sws_init_context(). For filling see AVOptions, options.c and
|
||||||
* sws_setColorspaceDetails().
|
* sws_setColorspaceDetails().
|
||||||
*/
|
*/
|
||||||
struct SwsContext *sws_alloc_context(void);
|
struct SwsContext *sws_alloc_context(void);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initializes the swscaler context sws_context.
|
* Initialize the swscaler context sws_context.
|
||||||
*
|
*
|
||||||
* @return zero or positive value on success, a negative value on
|
* @return zero or positive value on success, a negative value on
|
||||||
* error
|
* error
|
||||||
@ -181,14 +181,14 @@ struct SwsContext *sws_alloc_context(void);
|
|||||||
int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter);
|
int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Frees the swscaler context swsContext.
|
* Free the swscaler context swsContext.
|
||||||
* If swsContext is NULL, then does nothing.
|
* If swsContext is NULL, then does nothing.
|
||||||
*/
|
*/
|
||||||
void sws_freeContext(struct SwsContext *swsContext);
|
void sws_freeContext(struct SwsContext *swsContext);
|
||||||
|
|
||||||
#if FF_API_SWS_GETCONTEXT
|
#if FF_API_SWS_GETCONTEXT
|
||||||
/**
|
/**
|
||||||
* Allocates and returns a SwsContext. You need it to perform
|
* Allocate and return an SwsContext. You need it to perform
|
||||||
* scaling/conversion operations using sws_scale().
|
* scaling/conversion operations using sws_scale().
|
||||||
*
|
*
|
||||||
* @param srcW the width of the source image
|
* @param srcW the width of the source image
|
||||||
@ -210,7 +210,7 @@ struct SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Scales the image slice in srcSlice and puts the resulting scaled
|
* Scale the image slice in srcSlice and put the resulting scaled
|
||||||
* slice in the image in dst. A slice is a sequence of consecutive
|
* slice in the image in dst. A slice is a sequence of consecutive
|
||||||
* rows in an image.
|
* rows in an image.
|
||||||
*
|
*
|
||||||
@ -255,35 +255,35 @@ int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,
|
|||||||
int *brightness, int *contrast, int *saturation);
|
int *brightness, int *contrast, int *saturation);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocates and returns an uninitialized vector with length coefficients.
|
* Allocate and return an uninitialized vector with length coefficients.
|
||||||
*/
|
*/
|
||||||
SwsVector *sws_allocVec(int length);
|
SwsVector *sws_allocVec(int length);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a normalized Gaussian curve used to filter stuff
|
* Return a normalized Gaussian curve used to filter stuff
|
||||||
* quality=3 is high quality, lower is lower quality.
|
* quality = 3 is high quality, lower is lower quality.
|
||||||
*/
|
*/
|
||||||
SwsVector *sws_getGaussianVec(double variance, double quality);
|
SwsVector *sws_getGaussianVec(double variance, double quality);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocates and returns a vector with length coefficients, all
|
* Allocate and return a vector with length coefficients, all
|
||||||
* with the same value c.
|
* with the same value c.
|
||||||
*/
|
*/
|
||||||
SwsVector *sws_getConstVec(double c, int length);
|
SwsVector *sws_getConstVec(double c, int length);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocates and returns a vector with just one coefficient, with
|
* Allocate and return a vector with just one coefficient, with
|
||||||
* value 1.0.
|
* value 1.0.
|
||||||
*/
|
*/
|
||||||
SwsVector *sws_getIdentityVec(void);
|
SwsVector *sws_getIdentityVec(void);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Scales all the coefficients of a by the scalar value.
|
* Scale all the coefficients of a by the scalar value.
|
||||||
*/
|
*/
|
||||||
void sws_scaleVec(SwsVector *a, double scalar);
|
void sws_scaleVec(SwsVector *a, double scalar);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Scales all the coefficients of a so that their sum equals height.
|
* Scale all the coefficients of a so that their sum equals height.
|
||||||
*/
|
*/
|
||||||
void sws_normalizeVec(SwsVector *a, double height);
|
void sws_normalizeVec(SwsVector *a, double height);
|
||||||
void sws_convVec(SwsVector *a, SwsVector *b);
|
void sws_convVec(SwsVector *a, SwsVector *b);
|
||||||
@ -292,13 +292,13 @@ void sws_subVec(SwsVector *a, SwsVector *b);
|
|||||||
void sws_shiftVec(SwsVector *a, int shift);
|
void sws_shiftVec(SwsVector *a, int shift);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocates and returns a clone of the vector a, that is a vector
|
* Allocate and return a clone of the vector a, that is a vector
|
||||||
* with the same coefficients as a.
|
* with the same coefficients as a.
|
||||||
*/
|
*/
|
||||||
SwsVector *sws_cloneVec(SwsVector *a);
|
SwsVector *sws_cloneVec(SwsVector *a);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Prints with av_log() a textual representation of the vector a
|
* Print with av_log() a textual representation of the vector a
|
||||||
* if log_level <= av_log_level.
|
* if log_level <= av_log_level.
|
||||||
*/
|
*/
|
||||||
void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level);
|
void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level);
|
||||||
@ -312,8 +312,7 @@ SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
|
|||||||
void sws_freeFilter(SwsFilter *filter);
|
void sws_freeFilter(SwsFilter *filter);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks if context can be reused, otherwise reallocates a new
|
* Check if context can be reused, otherwise reallocate a new one.
|
||||||
* one.
|
|
||||||
*
|
*
|
||||||
* If context is NULL, just calls sws_getContext() to get a new
|
* If context is NULL, just calls sws_getContext() to get a new
|
||||||
* context. Otherwise, checks if the parameters are the ones already
|
* context. Otherwise, checks if the parameters are the ones already
|
||||||
@ -331,7 +330,7 @@ struct SwsContext *sws_getCachedContext(struct SwsContext *context,
|
|||||||
SwsFilter *dstFilter, const double *param);
|
SwsFilter *dstFilter, const double *param);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Converts an 8bit paletted frame into a frame with a color depth of 32-bits.
|
* Convert an 8-bit paletted frame into a frame with a color depth of 32 bits.
|
||||||
*
|
*
|
||||||
* The output frame will have the same packed format as the palette.
|
* The output frame will have the same packed format as the palette.
|
||||||
*
|
*
|
||||||
@ -343,7 +342,7 @@ struct SwsContext *sws_getCachedContext(struct SwsContext *context,
|
|||||||
void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);
|
void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Converts an 8bit paletted frame into a frame with a color depth of 24 bits.
|
* Convert an 8-bit paletted frame into a frame with a color depth of 24 bits.
|
||||||
*
|
*
|
||||||
* With the palette format "ABCD", the destination frame ends up with the format "ABC".
|
* With the palette format "ABCD", the destination frame ends up with the format "ABC".
|
||||||
*
|
*
|
||||||
|
@ -675,7 +675,7 @@ extern const uint16_t dither_scale[15][16];
|
|||||||
extern const AVClass sws_context_class;
|
extern const AVClass sws_context_class;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets c->swScale to an unscaled converter if one exists for the specific
|
* Set c->swScale to an unscaled converter if one exists for the specific
|
||||||
* source and destination formats, bit depths, flags, etc.
|
* source and destination formats, bit depths, flags, etc.
|
||||||
*/
|
*/
|
||||||
void ff_get_unscaled_swscale(SwsContext *c);
|
void ff_get_unscaled_swscale(SwsContext *c);
|
||||||
@ -683,7 +683,7 @@ void ff_get_unscaled_swscale(SwsContext *c);
|
|||||||
void ff_swscale_get_unscaled_altivec(SwsContext *c);
|
void ff_swscale_get_unscaled_altivec(SwsContext *c);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns function pointer to fastest main scaler path function depending
|
* Return function pointer to fastest main scaler path function depending
|
||||||
* on architecture and available optimizations.
|
* on architecture and available optimizations.
|
||||||
*/
|
*/
|
||||||
SwsFunc ff_getSwsFunc(SwsContext *c);
|
SwsFunc ff_getSwsFunc(SwsContext *c);
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Generates a synthetic stereo sound
|
* Generate a synthetic stereo sound.
|
||||||
* NOTE: No floats are used to guarantee a bit exact output.
|
* NOTE: No floats are used to guarantee bitexact output.
|
||||||
*
|
*
|
||||||
* Copyright (c) 2002 Fabrice Bellard
|
* Copyright (c) 2002 Fabrice Bellard
|
||||||
*
|
*
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Generates a synthetic YUV video sequence suitable for codec testing.
|
* Generate a synthetic YUV video sequence suitable for codec testing.
|
||||||
*
|
*
|
||||||
* copyright (c) Sebastien Bechet <s.bechet@av7.net>
|
* copyright (c) Sebastien Bechet <s.bechet@av7.net>
|
||||||
*
|
*
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Generates a synthetic YUV video sequence suitable for codec testing.
|
* Generate a synthetic YUV video sequence suitable for codec testing.
|
||||||
* NOTE: No floats are used to guarantee a bit exact output.
|
* NOTE: No floats are used to guarantee bitexact output.
|
||||||
*
|
*
|
||||||
* Copyright (c) 2002 Fabrice Bellard
|
* Copyright (c) 2002 Fabrice Bellard
|
||||||
*
|
*
|
||||||
|
@ -117,7 +117,7 @@ int main(int argc, char **argv)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
av_close_input_file(fctx);
|
avformat_close_input(&fctx);
|
||||||
|
|
||||||
while (donotquit)
|
while (donotquit)
|
||||||
sleep(60);
|
sleep(60);
|
||||||
|
Loading…
Reference in New Issue
Block a user