1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  os_support: Define SHUT_RD, SHUT_WR and SHUT_RDWR on OS/2
  http: Add support for reading http POST reply headers
  http: Add http_shutdown() for ending writing of posts
  tcp: Allow signalling end of reading/writing
  avio: Add a function for signalling end of reading/writing
  lavfi: fix comment, audio is supported now.
  lavfi: fix incorrect comment.
  lavfi: remove avfilter_null_* from public API on next bump.
  lavfi: remove avfilter_default_* from public API on next bump.
  lavfi: deprecate default config_props() callback and refactor avfilter_config_links()
  avfiltergraph: smarter sample format selection.
  avconv: rename transcode_audio/video to decode_audio/video.
  asyncts: reset delta to 0 when it's not used.
  x86: lavc: use %if HAVE_AVX guards around AVX functions in yasm code.
  dwt: return errors from ff_slice_buffer_init()

Conflicts:
	ffmpeg.c
	libavfilter/avfilter.c
	libavfilter/avfilter.h
	libavfilter/formats.c
	libavfilter/version.h
	libavfilter/vf_blackframe.c
	libavfilter/vf_drawtext.c
	libavfilter/vf_fade.c
	libavfilter/vf_format.c
	libavfilter/vf_showinfo.c
	libavfilter/video.c
	libavfilter/video.h

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-05-23 21:41:13 +02:00
commit d0ad91c258
40 changed files with 361 additions and 84 deletions

View File

@ -2538,7 +2538,7 @@ static int guess_input_channel_layout(InputStream *ist)
return 1;
}
static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
{
AVFrame *decoded_frame;
AVCodecContext *avctx = ist->st->codec;
@ -2639,7 +2639,7 @@ static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
return ret;
}
static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output)
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
{
AVFrame *decoded_frame;
void *buffer_to_free = NULL;
@ -2824,10 +2824,10 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
switch (ist->st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
ret = transcode_audio (ist, &avpkt, &got_output);
ret = decode_audio (ist, &avpkt, &got_output);
break;
case AVMEDIA_TYPE_VIDEO:
ret = transcode_video (ist, &avpkt, &got_output);
ret = decode_video (ist, &avpkt, &got_output);
if (avpkt.duration) {
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
} else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {

View File

@ -24,9 +24,9 @@
#include "dwt.h"
#include "libavcodec/x86/dwt.h"
void ff_slice_buffer_init(slice_buffer *buf, int line_count,
int max_allocated_lines, int line_width,
IDWTELEM *base_buffer)
int ff_slice_buffer_init(slice_buffer *buf, int line_count,
int max_allocated_lines, int line_width,
IDWTELEM *base_buffer)
{
int i;
@ -55,6 +55,7 @@ void ff_slice_buffer_init(slice_buffer *buf, int line_count,
}
buf->data_stack_top = max_allocated_lines - 1;
return 0;
}
IDWTELEM *ff_slice_buffer_load_line(slice_buffer *buf, int line)

View File

@ -228,9 +228,9 @@ void ff_spatial_idwt_slice2(DWTContext *d, int y);
: ff_slice_buffer_load_line((slice_buf), \
(line_num)))
void ff_slice_buffer_init(slice_buffer *buf, int line_count,
int max_allocated_lines, int line_width,
IDWTELEM *base_buffer);
int ff_slice_buffer_init(slice_buffer *buf, int line_count,
int max_allocated_lines, int line_width,
IDWTELEM *base_buffer);
void ff_slice_buffer_release(slice_buffer *buf, int line);
void ff_slice_buffer_flush(slice_buffer *buf);
void ff_slice_buffer_destroy(slice_buffer *buf);

View File

@ -396,7 +396,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
// realloc slice buffer for the case that spatial_decomposition_count changed
ff_slice_buffer_destroy(&s->sb);
ff_slice_buffer_init(&s->sb, s->plane[0].height, (MB_SIZE >> s->block_max_depth) + s->spatial_decomposition_count * 8 + 1, s->plane[0].width, s->spatial_idwt_buffer);
if ((res = ff_slice_buffer_init(&s->sb, s->plane[0].height,
(MB_SIZE >> s->block_max_depth) +
s->spatial_decomposition_count * 8 + 1,
s->plane[0].width,
s->spatial_idwt_buffer)) < 0)
return res;
for(plane_index=0; plane_index<3; plane_index++){
Plane *p= &s->plane[plane_index];

View File

@ -1156,8 +1156,10 @@ ALIGN 16
INIT_XMM sse
VECTOR_FMUL
%if HAVE_AVX
INIT_YMM avx
VECTOR_FMUL
%endif
;-----------------------------------------------------------------------------
; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
@ -1198,8 +1200,10 @@ ALIGN 16
INIT_XMM sse
VECTOR_FMUL_REVERSE
%if HAVE_AVX
INIT_YMM avx
VECTOR_FMUL_REVERSE
%endif
;-----------------------------------------------------------------------------
; vector_fmul_add(float *dst, const float *src0, const float *src1,
@ -1231,8 +1235,10 @@ ALIGN 16
INIT_XMM sse
VECTOR_FMUL_ADD
%if HAVE_AVX
INIT_YMM avx
VECTOR_FMUL_ADD
%endif
;-----------------------------------------------------------------------------
; void ff_butterflies_float_interleave(float *dst, const float *src0,

View File

@ -182,10 +182,13 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
if (labs(delta) > s->min_delta) {
av_log(ctx, AV_LOG_VERBOSE, "Discontinuity - %"PRId64" samples.\n", delta);
out_size += delta;
} else if (s->resample) {
int comp = av_clip(delta, -s->max_comp, s->max_comp);
av_log(ctx, AV_LOG_VERBOSE, "Compensating %d samples per second.\n", comp);
avresample_set_compensation(s->avr, delta, inlink->sample_rate);
} else {
if (s->resample) {
int comp = av_clip(delta, -s->max_comp, s->max_comp);
av_log(ctx, AV_LOG_VERBOSE, "Compensating %d samples per second.\n", comp);
avresample_set_compensation(s->avr, delta, inlink->sample_rate);
}
delta = 0;
}
if (out_size > 0) {

View File

@ -202,7 +202,6 @@ int avfilter_config_links(AVFilterContext *filter)
link->sample_aspect_ratio = inlink ?
inlink->sample_aspect_ratio : (AVRational){1,1};
#if 1
if (inlink) {
if (!link->w)
link->w = inlink->w;
@ -233,7 +232,6 @@ int avfilter_config_links(AVFilterContext *filter)
link->time_base = (AVRational) {1, link->sample_rate};
}
#endif
if ((config_link = link->dstpad->config_props))
if ((ret = config_link(link)) < 0)
return ret;

View File

@ -383,7 +383,7 @@ struct AVFilterPad {
const char *name;
/**
* AVFilterPad type. Can be AVMEDIA_TYPE_VIDEO or AVMEDIA_TYPE_AUDIO.
* AVFilterPad type.
*/
enum AVMediaType type;
@ -465,7 +465,7 @@ struct AVFilterPad {
*
* Defaults to just calling the source poll_frame() method.
*
* Output video pads only.
* Output pads only.
*/
int (*poll_frame)(AVFilterLink *link);
@ -476,7 +476,7 @@ struct AVFilterPad {
* See avfilter_request_frame() for the error codes with a specific
* meaning.
*
* Output video pads only.
* Output pads only.
*/
int (*request_frame)(AVFilterLink *link);
@ -500,19 +500,28 @@ struct AVFilterPad {
int (*config_props)(AVFilterLink *link);
};
#if FF_API_FILTERS_PUBLIC
/** default handler for start_frame() for video inputs */
attribute_deprecated
void avfilter_default_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
/** default handler for draw_slice() for video inputs */
attribute_deprecated
void avfilter_default_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
/** default handler for end_frame() for video inputs */
attribute_deprecated
void avfilter_default_end_frame(AVFilterLink *link);
/** default handler for get_video_buffer() for video inputs */
attribute_deprecated
AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link,
int perms, int w, int h);
/** Default handler for query_formats() */
attribute_deprecated
int avfilter_default_query_formats(AVFilterContext *ctx);
#endif
/**
* Helpers for query_formats() which set all links to the same list of
@ -527,21 +536,24 @@ void avfilter_set_common_channel_layouts(AVFilterContext *ctx, AVFilterFormats *
void avfilter_set_common_packing_formats(AVFilterContext *ctx, AVFilterFormats *formats);
#endif
/** Default handler for query_formats() */
int avfilter_default_query_formats(AVFilterContext *ctx);
#if FF_API_FILTERS_PUBLIC
/** start_frame() handler for filters which simply pass video along */
attribute_deprecated
void avfilter_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
/** draw_slice() handler for filters which simply pass video along */
attribute_deprecated
void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
/** end_frame() handler for filters which simply pass video along */
attribute_deprecated
void avfilter_null_end_frame(AVFilterLink *link);
/** get_video_buffer() handler for filters which simply pass video along */
attribute_deprecated
AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link,
int perms, int w, int h);
#endif
/**
* Filter definition. This defines the pads a filter contains, and all the

View File

@ -227,7 +227,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
if (graph->filters[i]->filter->query_formats)
graph->filters[i]->filter->query_formats(graph->filters[i]);
else
avfilter_default_query_formats(graph->filters[i]);
ff_default_query_formats(graph->filters[i]);
}
/* go through and merge as many format lists as possible */
@ -571,6 +571,74 @@ static void swap_channel_layouts(AVFilterGraph *graph)
swap_channel_layouts_on_filter(graph->filters[i]);
}
static void swap_sample_fmts_on_filter(AVFilterContext *filter)
{
AVFilterLink *link = NULL;
int format, bps;
int i, j;
for (i = 0; i < filter->input_count; i++) {
link = filter->inputs[i];
if (link->type == AVMEDIA_TYPE_AUDIO &&
link->out_formats->format_count == 1)
break;
}
if (i == filter->input_count)
return;
format = link->out_formats->formats[0];
bps = av_get_bytes_per_sample(format);
for (i = 0; i < filter->output_count; i++) {
AVFilterLink *outlink = filter->outputs[i];
int best_idx, best_score = INT_MIN;
if (outlink->type != AVMEDIA_TYPE_AUDIO ||
outlink->in_formats->format_count < 2)
continue;
for (j = 0; j < outlink->in_formats->format_count; j++) {
int out_format = outlink->in_formats->formats[j];
int out_bps = av_get_bytes_per_sample(out_format);
int score;
if (av_get_packed_sample_fmt(out_format) == format ||
av_get_planar_sample_fmt(out_format) == format) {
best_idx = j;
break;
}
/* for s32 and float prefer double to prevent loss of information */
if (bps == 4 && out_bps == 8) {
best_idx = j;
break;
}
/* prefer closest higher or equal bps */
score = -abs(out_bps - bps);
if (out_bps >= bps)
score += INT_MAX/2;
if (score > best_score) {
best_score = score;
best_idx = j;
}
}
FFSWAP(int, outlink->in_formats->formats[0],
outlink->in_formats->formats[best_idx]);
}
}
static void swap_sample_fmts(AVFilterGraph *graph)
{
int i;
for (i = 0; i < graph->filter_count; i++)
swap_sample_fmts_on_filter(graph->filters[i]);
}
static int pick_formats(AVFilterGraph *graph)
{
int i, j, ret;
@ -633,8 +701,9 @@ int ff_avfilter_graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx)
* of format conversion inside filters */
reduce_formats(graph);
/* for audio filters, ensure the best sample rate and channel layout
/* for audio filters, ensure the best format, sample rate and channel layout
* is selected */
swap_sample_fmts(graph);
swap_samplerates(graph);
swap_channel_layouts(graph);

View File

@ -449,7 +449,7 @@ void avfilter_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
avfilter_formats_ref, formats);
}
int avfilter_default_query_formats(AVFilterContext *ctx)
int ff_default_query_formats(AVFilterContext *ctx)
{
enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type :
ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type :
@ -534,6 +534,12 @@ int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx)
return 0;
}
#if FF_API_FILTERS_PUBLIC
int avfilter_default_query_formats(AVFilterContext *ctx)
{
return ff_default_query_formats(ctx);
}
#endif
#ifdef TEST
#undef printf
@ -552,4 +558,3 @@ int main(void)
}
#endif

View File

@ -78,4 +78,6 @@ void ff_channel_layouts_unref(AVFilterChannelLayouts **ref);
void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref,
AVFilterChannelLayouts **newref);
int ff_default_query_formats(AVFilterContext *ctx);
#endif // AVFILTER_FORMATS_H

View File

@ -25,6 +25,7 @@
#include "avfilter.h"
#include "audio.h"
#include "video.h"
static int split_init(AVFilterContext *ctx, const char *args, void *opaque)
{
@ -100,7 +101,7 @@ AVFilter avfilter_vf_split = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer= avfilter_null_get_video_buffer,
.get_video_buffer= ff_null_get_video_buffer,
.start_frame = start_frame,
.draw_slice = draw_slice,
.end_frame = end_frame, },

View File

@ -56,5 +56,11 @@
#ifndef FF_API_PACKING
#define FF_API_PACKING (LIBAVFILTER_VERSION_MAJOR < 3)
#endif
#ifndef FF_API_DEFAULT_CONFIG_OUTPUT_LINK
#define FF_API_DEFAULT_CONFIG_OUTPUT_LINK (LIBAVFILTER_VERSION_MAJOR < 3)
#endif
#ifndef FF_API_FILTERS_PUBLIC
#define FF_API_FILTERS_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 3)
#endif
#endif // AVFILTER_VERSION_H

View File

@ -26,6 +26,7 @@
#include "libavutil/mathematics.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "video.h"
typedef struct {
AVRational ratio;
@ -86,9 +87,9 @@ AVFilter avfilter_vf_setdar = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = setdar_config_props,
.get_video_buffer = avfilter_null_get_video_buffer,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = start_frame,
.end_frame = avfilter_null_end_frame },
.end_frame = ff_null_end_frame },
{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",
@ -118,9 +119,9 @@ AVFilter avfilter_vf_setsar = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = setsar_config_props,
.get_video_buffer = avfilter_null_get_video_buffer,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = start_frame,
.end_frame = avfilter_null_end_frame },
.end_frame = ff_null_end_frame },
{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",

View File

@ -29,6 +29,7 @@
#include "avfilter.h"
#include "internal.h"
#include "video.h"
typedef struct {
unsigned int bamount; ///< black amount
@ -127,7 +128,7 @@ AVFilter avfilter_vf_blackframe = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.draw_slice = draw_slice,
.get_video_buffer = avfilter_null_get_video_buffer,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = ff_null_start_frame_keep_ref,
.end_frame = end_frame, },
{ .name = NULL}},

View File

@ -22,6 +22,7 @@
*/
#include "avfilter.h"
#include "video.h"
AVFilter avfilter_vf_copy = {
.name = "copy",
@ -29,9 +30,9 @@ AVFilter avfilter_vf_copy = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer,
.start_frame = avfilter_null_start_frame,
.end_frame = avfilter_null_end_frame,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = ff_null_start_frame,
.end_frame = ff_null_end_frame,
.rej_perms = ~0 },
{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",

View File

@ -26,6 +26,7 @@
/* #define DEBUG */
#include "avfilter.h"
#include "video.h"
#include "libavutil/eval.h"
#include "libavutil/avstring.h"
#include "libavutil/libm.h"
@ -352,7 +353,7 @@ AVFilter avfilter_vf_crop = {
.start_frame = start_frame,
.draw_slice = draw_slice,
.end_frame = end_frame,
.get_video_buffer = avfilter_null_get_video_buffer,
.get_video_buffer = ff_null_get_video_buffer,
.config_props = config_input, },
{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",

View File

@ -25,6 +25,7 @@
#include "libavutil/imgutils.h"
#include "avfilter.h"
#include "video.h"
typedef struct {
int x1, y1, x2, y2;
@ -203,8 +204,8 @@ AVFilter avfilter_vf_cropdetect = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.get_video_buffer = avfilter_null_get_video_buffer,
.start_frame = avfilter_null_start_frame,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = ff_null_start_frame,
.end_frame = end_frame, },
{ .name = NULL}},

View File

@ -29,6 +29,7 @@
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "video.h"
/**
* Apply a simple delogo algorithm to the image in dst and put the
@ -275,7 +276,7 @@ AVFilter avfilter_vf_delogo = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = start_frame,
.draw_slice = null_draw_slice,
.end_frame = end_frame,

View File

@ -28,6 +28,7 @@
#include "libavutil/pixdesc.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "video.h"
enum { Y, U, V, A };
@ -130,10 +131,10 @@ AVFilter avfilter_vf_drawbox = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.get_video_buffer = avfilter_null_get_video_buffer,
.start_frame = avfilter_null_start_frame,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = ff_null_start_frame,
.draw_slice = draw_slice,
.end_frame = avfilter_null_end_frame,
.end_frame = ff_null_end_frame,
.min_perms = AV_PERM_WRITE | AV_PERM_READ,
.rej_perms = AV_PERM_PRESERVE },
{ .name = NULL}},

View File

@ -41,6 +41,7 @@
#include "libavutil/lfg.h"
#include "avfilter.h"
#include "drawutils.h"
#include "video.h"
#undef time
@ -830,8 +831,8 @@ AVFilter avfilter_vf_drawtext = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer,
.start_frame = avfilter_null_start_frame,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = ff_null_start_frame,
.draw_slice = null_draw_slice,
.end_frame = end_frame,
.config_props = config_input,

View File

@ -32,6 +32,7 @@
#include "avfilter.h"
#include "drawutils.h"
#include "internal.h"
#include "video.h"
#define R 0
#define G 1
@ -288,8 +289,8 @@ AVFilter avfilter_vf_fade = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props,
.get_video_buffer = avfilter_null_get_video_buffer,
.start_frame = avfilter_null_start_frame,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = ff_null_start_frame,
.draw_slice = draw_slice,
.end_frame = end_frame,
.min_perms = AV_PERM_READ | AV_PERM_WRITE,

View File

@ -24,6 +24,7 @@
*/
#include "avfilter.h"
#include "video.h"
typedef struct BufPic {
AVFilterBufferRef *picref;
@ -106,7 +107,7 @@ AVFilter avfilter_vf_fifo = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer= avfilter_null_get_video_buffer,
.get_video_buffer= ff_null_get_video_buffer,
.start_frame = start_frame,
.draw_slice = draw_slice,
.end_frame = end_frame,

View File

@ -26,6 +26,7 @@
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
typedef struct {
/**
@ -102,10 +103,10 @@ AVFilter avfilter_vf_format = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer= avfilter_null_get_video_buffer,
.start_frame = avfilter_null_start_frame,
.draw_slice = avfilter_null_draw_slice,
.end_frame = avfilter_null_end_frame, },
.get_video_buffer= ff_null_get_video_buffer,
.start_frame = ff_null_start_frame,
.draw_slice = ff_null_draw_slice,
.end_frame = ff_null_end_frame, },
{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO },
@ -132,10 +133,10 @@ AVFilter avfilter_vf_noformat = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer= avfilter_null_get_video_buffer,
.start_frame = avfilter_null_start_frame,
.draw_slice = avfilter_null_draw_slice,
.end_frame = avfilter_null_end_frame, },
.get_video_buffer= ff_null_get_video_buffer,
.start_frame = ff_null_start_frame,
.draw_slice = ff_null_draw_slice,
.end_frame = ff_null_end_frame, },
{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO },

View File

@ -22,6 +22,7 @@
*/
#include "avfilter.h"
#include "video.h"
AVFilter avfilter_vf_null = {
.name = "null",
@ -31,9 +32,9 @@ AVFilter avfilter_vf_null = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer,
.start_frame = avfilter_null_start_frame,
.end_frame = avfilter_null_end_frame },
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = ff_null_start_frame,
.end_frame = ff_null_end_frame },
{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",

View File

@ -26,6 +26,7 @@
#include "libavutil/eval.h"
#include "libavutil/fifo.h"
#include "avfilter.h"
#include "video.h"
static const char *const var_names[] = {
"TB", ///< timebase
@ -326,7 +327,7 @@ AVFilter avfilter_vf_select = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer,
.get_video_buffer = ff_null_get_video_buffer,
.config_props = config_input,
.start_frame = start_frame,
.draw_slice = draw_slice,

View File

@ -29,6 +29,7 @@
#include "libavutil/eval.h"
#include "libavutil/mathematics.h"
#include "avfilter.h"
#include "video.h"
static const char *const var_names[] = {
"INTERLACED", ///< tell if the current frame is interlaced
@ -139,7 +140,7 @@ AVFilter avfilter_vf_setpts = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer,
.get_video_buffer = ff_null_get_video_buffer,
.config_props = config_input,
.start_frame = start_frame, },
{ .name = NULL }},

View File

@ -29,6 +29,7 @@
#include "libavutil/rational.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
static const char *const var_names[] = {
"AVTB", /* default timebase 1/AV_TIME_BASE */
@ -121,9 +122,9 @@ AVFilter avfilter_vf_settb = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = start_frame,
.end_frame = avfilter_null_end_frame },
.end_frame = ff_null_end_frame },
{ .name = NULL }},
.outputs = (const AVFilterPad[]) {{ .name = "default",

View File

@ -28,6 +28,7 @@
#include "libavutil/timestamp.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
typedef struct {
unsigned int frame;
@ -93,7 +94,7 @@ AVFilter avfilter_vf_showinfo = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = ff_null_start_frame_keep_ref,
.end_frame = end_frame,
.min_perms = AV_PERM_READ, },

View File

@ -24,6 +24,7 @@
*/
#include "avfilter.h"
#include "video.h"
#include "libavutil/pixdesc.h"
typedef struct {
@ -105,11 +106,11 @@ AVFilter avfilter_vf_slicify = {
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer,
.get_video_buffer = ff_null_get_video_buffer,
.start_frame = start_frame,
.draw_slice = draw_slice,
.config_props = config_props,
.end_frame = avfilter_null_end_frame, },
.end_frame = ff_null_end_frame, },
{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, },

View File

@ -25,6 +25,7 @@
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "video.h"
typedef struct {
int vsub; ///< vertical chroma subsampling
@ -47,7 +48,7 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms,
int i;
if (!(perms & AV_PERM_NEG_LINESIZES))
return avfilter_default_get_video_buffer(link, perms, w, h);
return ff_default_get_video_buffer(link, perms, w, h);
picref = avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
for (i = 0; i < 4; i ++) {

View File

@ -22,6 +22,7 @@
#include "libavutil/common.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "video.h"
#include "yadif.h"
#undef NDEBUG
@ -179,7 +180,7 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, int w,
int height= FFALIGN(h+2, 32);
int i;
picref = avfilter_default_get_video_buffer(link, perms, width, height);
picref = ff_default_get_video_buffer(link, perms, width, height);
picref->video->w = w;
picref->video->h = h;

View File

@ -20,6 +20,7 @@
#include "avfilter.h"
#include "internal.h"
#include "video.h"
static char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms)
{
@ -61,12 +62,12 @@ static void ff_dlog_ref(void *ctx, AVFilterBufferRef *ref, int end)
av_dlog(ctx, "]%s", end ? "\n" : "");
}
AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
}
AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
int linesize[4];
uint8_t *data[4];
@ -174,7 +175,7 @@ AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int
ret = link->dstpad->get_video_buffer(link, perms, w, h);
if (!ret)
ret = avfilter_default_get_video_buffer(link, perms, w, h);
ret = ff_default_get_video_buffer(link, perms, w, h);
if (ret)
ret->type = AVMEDIA_TYPE_VIDEO;
@ -184,12 +185,12 @@ AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int
return ret;
}
void avfilter_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
void ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
avfilter_start_frame(link->dst->outputs[0], picref);
}
void avfilter_default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
static void default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
AVFilterLink *outlink = NULL;
@ -215,7 +216,7 @@ void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
FF_DPRINTF_START(NULL, start_frame); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " "); ff_dlog_ref(NULL, picref, 1);
if (!(start_frame = dst->start_frame))
start_frame = avfilter_default_start_frame;
start_frame = default_start_frame;
if (picref->linesize[0] < 0)
perms |= AV_PERM_NEG_LINESIZES;
@ -246,12 +247,12 @@ void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
ff_update_link_current_pts(link, link->cur_buf->pts);
}
void avfilter_null_end_frame(AVFilterLink *link)
void ff_null_end_frame(AVFilterLink *link)
{
avfilter_end_frame(link->dst->outputs[0]);
}
void avfilter_default_end_frame(AVFilterLink *inlink)
static void default_end_frame(AVFilterLink *inlink)
{
AVFilterLink *outlink = NULL;
@ -275,7 +276,7 @@ void avfilter_end_frame(AVFilterLink *link)
void (*end_frame)(AVFilterLink *);
if (!(end_frame = link->dstpad->end_frame))
end_frame = avfilter_default_end_frame;
end_frame = default_end_frame;
end_frame(link);
@ -287,12 +288,12 @@ void avfilter_end_frame(AVFilterLink *link)
}
}
void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
void ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{
avfilter_draw_slice(link->dst->outputs[0], y, h, slice_dir);
}
void avfilter_default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
static void default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
{
AVFilterLink *outlink = NULL;
@ -340,7 +341,41 @@ void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
}
if (!(draw_slice = link->dstpad->draw_slice))
draw_slice = avfilter_default_draw_slice;
draw_slice = default_draw_slice;
draw_slice(link, y, h, slice_dir);
}
#if FF_API_FILTERS_PUBLIC
AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
return ff_default_get_video_buffer(link, perms, w, h);
}
void avfilter_default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
default_start_frame(inlink, picref);
}
void avfilter_default_end_frame(AVFilterLink *inlink)
{
default_end_frame(inlink);
}
void avfilter_default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
{
default_draw_slice(inlink, y, h, slice_dir);
}
AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
{
return ff_null_get_video_buffer(link, perms, w, h);
}
void avfilter_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
ff_null_start_frame(link, picref);
}
void avfilter_null_end_frame(AVFilterLink *link)
{
ff_null_end_frame(link);
}
void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{
ff_null_draw_slice(link, y, h, slice_dir);
}
#endif

34
libavfilter/video.h Normal file
View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_VIDEO_H
#define AVFILTER_VIDEO_H
AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link,
int perms, int w, int h);
AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h);
void ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
void ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
void ff_null_end_frame(AVFilterLink *link);
#endif /* AVFILTER_VIDEO_H */

View File

@ -376,6 +376,13 @@ int ffurl_get_file_handle(URLContext *h)
return h->prot->url_get_file_handle(h);
}
int ffurl_shutdown(URLContext *h, int flags)
{
if (!h->prot->url_shutdown)
return AVERROR(EINVAL);
return h->prot->url_shutdown(h, flags);
}
int ff_check_interrupt(AVIOInterruptCB *cb)
{
int ret;

View File

@ -52,6 +52,8 @@ typedef struct {
char *headers;
int willclose; /**< Set if the server correctly handles Connection: close and will close the connection after feeding us the content. */
int chunked_post;
int end_chunked_post; /**< A flag which indicates if the end of chunked encoding has been sent. */
int end_header; /**< A flag which indicates we have finished to read POST reply. */
} HTTPContext;
#define OFFSET(x) offsetof(HTTPContext, x)
@ -251,8 +253,10 @@ static int process_line(URLContext *h, char *line, int line_count,
char *tag, *p, *end;
/* end of header */
if (line[0] == '\0')
if (line[0] == '\0') {
s->end_header = 1;
return 0;
}
p = line;
if (line_count == 0) {
@ -419,6 +423,7 @@ static int http_connect(URLContext *h, const char *path, const char *local_path,
s->off = 0;
s->filesize = -1;
s->willclose = 0;
s->end_chunked_post = 0;
if (post) {
/* Pretend that it did work. We didn't read any header yet, since
* we've still to send the POST data, but the code calling this
@ -464,6 +469,17 @@ static int http_buf_read(URLContext *h, uint8_t *buf, int size)
static int http_read(URLContext *h, uint8_t *buf, int size)
{
HTTPContext *s = h->priv_data;
int err, new_location;
if (s->end_chunked_post) {
if (!s->end_header) {
err = http_read_header(h, &new_location);
if (err < 0)
return err;
}
return http_buf_read(h, buf, size);
}
if (s->chunksize >= 0) {
if (!s->chunksize) {
@ -516,16 +532,30 @@ static int http_write(URLContext *h, const uint8_t *buf, int size)
return size;
}
static int http_close(URLContext *h)
static int http_shutdown(URLContext *h, int flags)
{
int ret = 0;
char footer[] = "0\r\n\r\n";
HTTPContext *s = h->priv_data;
/* signal end of chunked encoding if used */
if ((h->flags & AVIO_FLAG_WRITE) && s->chunked_post) {
if ((flags & AVIO_FLAG_WRITE) && s->chunked_post) {
ret = ffurl_write(s->hd, footer, sizeof(footer) - 1);
ret = ret > 0 ? 0 : ret;
s->end_chunked_post = 1;
}
return ret;
}
static int http_close(URLContext *h)
{
int ret = 0;
HTTPContext *s = h->priv_data;
if (!s->end_chunked_post) {
/* Close the write direction by sending the end of chunked encoding. */
ret = http_shutdown(h, h->flags);
}
if (s->hd)
@ -585,6 +615,7 @@ URLProtocol ff_http_protocol = {
.url_seek = http_seek,
.url_close = http_close,
.url_get_file_handle = http_get_file_handle,
.url_shutdown = http_shutdown,
.priv_data_size = sizeof(HTTPContext),
.priv_data_class = &http_context_class,
.flags = URL_PROTOCOL_FLAG_NETWORK,

View File

@ -48,6 +48,18 @@ static inline int is_dos_path(const char *path)
return 0;
}
#if defined(__OS2__)
#define SHUT_RD 0
#define SHUT_WR 1
#define SHUT_RDWR 2
#endif
#if defined(_WIN32)
#define SHUT_RD SD_RECEIVE
#define SHUT_WR SD_SEND
#define SHUT_RDWR SD_BOTH
#endif
#if defined(_WIN32) && !defined(__MINGW32CE__)
int ff_win32_open(const char *filename, int oflag, int pmode);
#define open ff_win32_open

View File

@ -182,6 +182,22 @@ static int tcp_write(URLContext *h, const uint8_t *buf, int size)
return ret < 0 ? ff_neterrno() : ret;
}
static int tcp_shutdown(URLContext *h, int flags)
{
TCPContext *s = h->priv_data;
int how;
if (flags & AVIO_FLAG_WRITE && flags & AVIO_FLAG_READ) {
how = SHUT_RDWR;
} else if (flags & AVIO_FLAG_WRITE) {
how = SHUT_WR;
} else {
how = SHUT_RD;
}
return shutdown(s->fd, how);
}
static int tcp_close(URLContext *h)
{
TCPContext *s = h->priv_data;
@ -202,6 +218,7 @@ URLProtocol ff_tcp_protocol = {
.url_write = tcp_write,
.url_close = tcp_close,
.url_get_file_handle = tcp_get_file_handle,
.url_shutdown = tcp_shutdown,
.priv_data_size = sizeof(TCPContext),
.flags = URL_PROTOCOL_FLAG_NETWORK,
};

View File

@ -81,6 +81,7 @@ typedef struct URLProtocol {
int64_t (*url_read_seek)(URLContext *h, int stream_index,
int64_t timestamp, int flags);
int (*url_get_file_handle)(URLContext *h);
int (*url_shutdown)(URLContext *h, int flags);
int priv_data_size;
const AVClass *priv_data_class;
int flags;
@ -200,6 +201,18 @@ int64_t ffurl_size(URLContext *h);
*/
int ffurl_get_file_handle(URLContext *h);
/**
* Signal the URLContext that we are done reading or writing the stream.
*
* @param h pointer to the resource
* @param flags flags which control how the resource indicated by url
* is to be shutdown
*
* @return a negative value if an error condition occurred, 0
* otherwise
*/
int ffurl_shutdown(URLContext *h, int flags);
/**
* Register the URLProtocol protocol.
*

View File

@ -22,6 +22,7 @@
#include "libavutil/pixdesc.h"
#include "libavutil/samplefmt.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/formats.h"
static void print_formats(AVFilterContext *filter_ctx)
{
@ -114,7 +115,7 @@ int main(int argc, char **argv)
if (filter->query_formats)
filter->query_formats(filter_ctx);
else
avfilter_default_query_formats(filter_ctx);
ff_default_query_formats(filter_ctx);
print_formats(filter_ctx);