1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

avfilter: do not use AVFrame accessor

Reviewed-by: wm4 <nfxjfg@googlemail.com>
Signed-off-by: Muhammad Faiz <mfcc64@gmail.com>
This commit is contained in:
Muhammad Faiz 2017-04-22 15:57:18 +07:00
parent 8103c59522
commit 6af050d7d0
52 changed files with 80 additions and 80 deletions

View File

@ -291,7 +291,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
av_frame_get_channels(out_buf), out_buf->format);
out_buf->channels, out_buf->format);
} else {
int64_t start;
@ -301,7 +301,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
start = s->start_sample + s->nb_samples - cur_sample;
s->fade_samples(out_buf->extended_data, buf->extended_data,
nb_samples, av_frame_get_channels(buf),
nb_samples, buf->channels,
s->type ? -1 : 1, start,
s->nb_samples, s->curve);
}
@ -498,7 +498,7 @@ static int acrossfade_filter_frame(AVFilterLink *inlink, AVFrame *in)
s->crossfade_samples(out->extended_data, cf[0]->extended_data,
cf[1]->extended_data,
s->nb_samples, av_frame_get_channels(in),
s->nb_samples, in->channels,
s->curve, s->curve2);
out->pts = s->pts;
s->pts += av_rescale_q(s->nb_samples,

View File

@ -280,7 +280,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
outbuf->nb_samples = nb_samples;
outbuf->channel_layout = outlink->channel_layout;
av_frame_set_channels(outbuf, outlink->channels);
outbuf->channels = outlink->channels;
while (nb_samples) {
ns = nb_samples;

View File

@ -119,7 +119,7 @@ static int request_frame(AVFilterLink *outlink)
av_samples_set_silence(outsamplesref->extended_data, 0,
n_out,
av_frame_get_channels(outsamplesref),
outsamplesref->channels,
outsamplesref->format);
outsamplesref->pts = s->next_pts;

View File

@ -254,7 +254,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inbuf)
}
s->phaser(s, inbuf->extended_data, outbuf->extended_data,
outbuf->nb_samples, av_frame_get_channels(outbuf));
outbuf->nb_samples, outbuf->channels);
if (inbuf != outbuf)
av_frame_free(&inbuf);

View File

@ -200,7 +200,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
av_frame_copy_props(outsamplesref, insamplesref);
outsamplesref->format = outlink->format;
av_frame_set_channels(outsamplesref, outlink->channels);
outsamplesref->channels = outlink->channels;
outsamplesref->channel_layout = outlink->channel_layout;
outsamplesref->sample_rate = outlink->sample_rate;

View File

@ -199,7 +199,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
s->plane_checksums[0];
}
av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), av_frame_get_channels(buf),
av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), buf->channels,
buf->channel_layout);
av_log(ctx, AV_LOG_INFO,
@ -208,8 +208,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
"checksum:%08"PRIX32" ",
inlink->frame_count_out,
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
av_frame_get_pkt_pos(buf),
av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str,
buf->pkt_pos,
av_get_sample_fmt_name(buf->format), buf->channels, chlayout_str,
buf->sample_rate, buf->nb_samples,
checksum);

View File

@ -305,7 +305,7 @@ static void set_metadata(AudioStatsContext *s, AVDictionary **metadata)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AudioStatsContext *s = inlink->dst->priv;
AVDictionary **metadata = avpriv_frame_get_metadatap(buf);
AVDictionary **metadata = &buf->metadata;
const int channels = s->nb_channels;
int i, c;

View File

@ -411,7 +411,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_frame_copy_props(out_buf, buf);
}
for (ch = 0; ch < av_frame_get_channels(buf); ch++)
for (ch = 0; ch < buf->channels; ch++)
s->filter(s, buf->extended_data[ch],
out_buf->extended_data[ch], nb_samples,
&s->cache[ch].i1, &s->cache[ch].i2,

View File

@ -354,7 +354,7 @@ static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0]));
buf->channel_layout = outlink->channel_layout;
av_frame_set_channels(buf, outlink->channels);
buf->channels = outlink->channels;
return ff_filter_frame(outlink, buf);
}

View File

@ -120,7 +120,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
buf_out->channel_layout =
av_channel_layout_extract_channel(buf->channel_layout, i);
av_frame_set_channels(buf_out, 1);
buf_out->channels = 1;
ret = ff_filter_frame(ctx->outputs[i], buf_out);
if (ret < 0)

View File

@ -358,7 +358,7 @@ static double find_peak_magnitude(AVFrame *frame, int channel)
int c, i;
if (channel == -1) {
for (c = 0; c < av_frame_get_channels(frame); c++) {
for (c = 0; c < frame->channels; c++) {
double *data_ptr = (double *)frame->extended_data[c];
for (i = 0; i < frame->nb_samples; i++)
@ -380,7 +380,7 @@ static double compute_frame_rms(AVFrame *frame, int channel)
int c, i;
if (channel == -1) {
for (c = 0; c < av_frame_get_channels(frame); c++) {
for (c = 0; c < frame->channels; c++) {
const double *data_ptr = (double *)frame->extended_data[c];
for (i = 0; i < frame->nb_samples; i++) {
@ -388,7 +388,7 @@ static double compute_frame_rms(AVFrame *frame, int channel)
}
}
rms_value /= frame->nb_samples * av_frame_get_channels(frame);
rms_value /= frame->nb_samples * frame->channels;
} else {
const double *data_ptr = (double *)frame->extended_data[channel];
for (i = 0; i < frame->nb_samples; i++) {

View File

@ -491,7 +491,7 @@ static int try_push_frame(AVFilterContext *ctx)
frame->nb_samples = nb_samples;
frame->channel_layout = outlink->channel_layout;
av_frame_set_channels(frame, outlink->channels);
frame->channels = outlink->channels;
frame->sample_rate = outlink->sample_rate;
frame->format = outlink->format;
frame->pts = s->input_frames[0]->pts;

View File

@ -389,7 +389,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
(void *)insamples->extended_data, n);
av_frame_copy_props(outsamples, insamples);
outsamples->channel_layout = outlink->channel_layout;
av_frame_set_channels(outsamples, outlink->channels);
outsamples->channels = outlink->channels;
ret = ff_filter_frame(outlink, outsamples);
av_frame_free(&insamples);

View File

@ -395,7 +395,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
vol->var_values[VAR_T ] = TS2T(buf->pts, inlink->time_base);
vol->var_values[VAR_N ] = inlink->frame_count_out;
pos = av_frame_get_pkt_pos(buf);
pos = buf->pkt_pos;
vol->var_values[VAR_POS] = pos == -1 ? NAN : pos;
if (vol->eval_mode == EVAL_MODE_FRAME)
set_volume(ctx);

View File

@ -62,7 +62,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *samples)
AVFilterContext *ctx = inlink->dst;
VolDetectContext *vd = ctx->priv;
int nb_samples = samples->nb_samples;
int nb_channels = av_frame_get_channels(samples);
int nb_channels = samples->channels;
int nb_planes = nb_channels;
int plane, i;
int16_t *pcm;

View File

@ -255,8 +255,8 @@ static int request_frame(AVFilterLink *outlink)
memcpy(samplesref->data[0], flite->wave_samples,
nb_samples * flite->wave->num_channels * 2);
samplesref->pts = flite->pts;
av_frame_set_pkt_pos(samplesref, -1);
av_frame_set_sample_rate(samplesref, flite->wave->sample_rate);
samplesref->pkt_pos = -1;
samplesref->sample_rate = flite->wave->sample_rate;
flite->pts += nb_samples;
flite->wave_samples += nb_samples * flite->wave->num_channels;
flite->wave_nb_samples -= nb_samples;

View File

@ -204,7 +204,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
memcpy(out->data[0] + i * out->linesize[0], out->data[0], outlink->w * 4);
}
metadata = avpriv_frame_get_metadatap(in);
metadata = &in->metadata;
if (metadata) {
uint8_t value[128];

View File

@ -1169,8 +1169,8 @@ static int plot_cqt(AVFilterContext *ctx, AVFrame **frameout)
if (!out)
return AVERROR(ENOMEM);
out->sample_aspect_ratio = av_make_q(1, 1);
av_frame_set_color_range(out, AVCOL_RANGE_MPEG);
av_frame_set_colorspace(out, s->csp);
out->color_range = AVCOL_RANGE_MPEG;
out->colorspace = s->csp;
UPDATE_TIME(s->alloc_time);
if (s->bar_h) {

View File

@ -426,7 +426,7 @@ static int config_output(AVFilterLink *outlink)
memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
}
av_frame_set_color_range(outpicref, AVCOL_RANGE_JPEG);
outpicref->color_range = AVCOL_RANGE_JPEG;
}
if ((s->orientation == VERTICAL && s->xpos >= s->w) ||

View File

@ -53,7 +53,7 @@ void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
"ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
ref, ref->buf, ref->data[0],
ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
ref->pts, av_frame_get_pkt_pos(ref));
ref->pts, ref->pkt_pos);
if (ref->width) {
ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
@ -1143,7 +1143,7 @@ int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
goto error;
}
if (av_frame_get_channels(frame) != link->channels) {
if (frame->channels != link->channels) {
av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
goto error;
}
@ -1585,7 +1585,7 @@ int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
case AVMEDIA_TYPE_AUDIO:
av_samples_copy(out->extended_data, frame->extended_data,
0, 0, frame->nb_samples,
av_frame_get_channels(frame),
frame->channels,
frame->format);
break;
default:
@ -1616,7 +1616,7 @@ int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *fram
{
AVFilterContext *dstctx = link->dst;
int64_t pts = frame->pts;
int64_t pos = av_frame_get_pkt_pos(frame);
int64_t pos = frame->pkt_pos;
if (!dstctx->enable_str)
return 1;

View File

@ -155,7 +155,7 @@ int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFra
int ret = 0;
if (frame && frame->channel_layout &&
av_get_channel_layout_nb_channels(frame->channel_layout) != av_frame_get_channels(frame)) {
av_get_channel_layout_nb_channels(frame->channel_layout) != frame->channels) {
av_log(ctx, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
return AVERROR(EINVAL);
}
@ -222,7 +222,7 @@ static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
if (!frame->channel_layout)
frame->channel_layout = s->channel_layout;
CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout,
av_frame_get_channels(frame), frame->format);
frame->channels, frame->format);
break;
default:
return AVERROR(EINVAL);

View File

@ -200,7 +200,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_frame_copy_props(out, in);
}
metadata = av_frame_get_metadata(in);
metadata = in->metadata;
for (i = 0; i < 4; i++) {
double values[VAR_VARS_NB];

View File

@ -275,7 +275,7 @@ static int push_frame(AVFilterContext *ctx)
if (!out)
return AVERROR(ENOMEM);
out->pts += s->duration - s->start_pts;
pts = out->pts + av_frame_get_pkt_duration(out);
pts = out->pts + out->pkt_duration;
ret = ff_filter_frame(outlink, out);
s->current_frame++;
@ -307,7 +307,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
return AVERROR(ENOMEM);
}
s->nb_frames++;
s->duration = frame->pts + av_frame_get_pkt_duration(frame);
s->duration = frame->pts + frame->pkt_duration;
ret = ff_filter_frame(outlink, frame);
} else {
av_frame_free(&frame);

View File

@ -280,7 +280,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
MetadataContext *s = ctx->priv;
AVDictionary **metadata = avpriv_frame_get_metadatap(frame);
AVDictionary **metadata = &frame->metadata;
AVDictionaryEntry *e;
if (!*metadata)

View File

@ -284,7 +284,7 @@ static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
static double get_concatdec_select(AVFrame *frame, int64_t pts)
{
AVDictionary *metadata = av_frame_get_metadata(frame);
AVDictionary *metadata = frame->metadata;
AVDictionaryEntry *start_time_entry = av_dict_get(metadata, "lavf.concatdec.start_time", NULL, 0);
AVDictionaryEntry *duration_entry = av_dict_get(metadata, "lavf.concatdec.duration", NULL, 0);
if (start_time_entry) {
@ -321,7 +321,7 @@ static void select_frame(AVFilterContext *ctx, AVFrame *frame)
select->var_values[VAR_N ] = inlink->frame_count_out;
select->var_values[VAR_PTS] = TS2D(frame->pts);
select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
select->var_values[VAR_POS] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
select->var_values[VAR_KEY] = frame->key_frame;
select->var_values[VAR_CONCATDEC_SELECT] = get_concatdec_select(frame, av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q));
@ -340,7 +340,7 @@ static void select_frame(AVFilterContext *ctx, AVFrame *frame)
select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
// TODO: document metadata
snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0);
av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0);
}
break;
}

View File

@ -129,7 +129,7 @@ static void buffer_offset(AVFilterLink *link, AVFrame *frame,
static int calc_ptr_alignment(AVFrame *frame)
{
int planes = av_sample_fmt_is_planar(frame->format) ?
av_frame_get_channels(frame) : 1;
frame->channels : 1;
int min_align = 128;
int p;

View File

@ -240,7 +240,7 @@ AVFrame *ff_frame_pool_get(FFFramePool *pool)
break;
case AVMEDIA_TYPE_AUDIO:
frame->nb_samples = pool->nb_samples;
av_frame_set_channels(frame, pool->channels);
frame->channels = pool->channels;
frame->format = pool->format;
frame->linesize[0] = pool->linesize[0];

View File

@ -165,7 +165,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
setpts->var_values[VAR_PTS ] = TS2D(frame->pts);
setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base);
setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
setpts->var_values[VAR_POS ] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
setpts->var_values[VAR_RTCTIME ] = av_gettime();
if (inlink->type == AVMEDIA_TYPE_VIDEO) {

View File

@ -559,7 +559,7 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
return 0;
}
frame->pts = av_frame_get_best_effort_timestamp(frame);
frame->pts = frame->best_effort_timestamp;
if (frame->pts != AV_NOPTS_VALUE) {
if (movie->ts_offset)
frame->pts += av_rescale_q_rnd(movie->ts_offset, AV_TIME_BASE_Q, outlink->time_base, AV_ROUND_UP);

View File

@ -84,7 +84,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
if (has_bbox) {
AVDictionary **metadata = avpriv_frame_get_metadatap(frame);
AVDictionary **metadata = &frame->metadata;
SET_META("lavfi.bbox.x1", box.x1)
SET_META("lavfi.bbox.x2", box.x2)

View File

@ -164,7 +164,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
/* black starts here */
blackdetect->black_started = 1;
blackdetect->black_start = picref->pts;
av_dict_set(avpriv_frame_get_metadatap(picref), "lavfi.black_start",
av_dict_set(&picref->metadata, "lavfi.black_start",
av_ts2timestr(blackdetect->black_start, &inlink->time_base), 0);
}
} else if (blackdetect->black_started) {
@ -172,7 +172,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
blackdetect->black_started = 0;
blackdetect->black_end = picref->pts;
check_black_end(ctx);
av_dict_set(avpriv_frame_get_metadatap(picref), "lavfi.black_end",
av_dict_set(&picref->metadata, "lavfi.black_end",
av_ts2timestr(blackdetect->black_end, &inlink->time_base), 0);
}

View File

@ -85,7 +85,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
pblack = s->nblack * 100 / (inlink->w * inlink->h);
if (pblack >= s->bamount) {
metadata = avpriv_frame_get_metadatap(frame);
metadata = &frame->metadata;
av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f "
"type:%c last_keyframe:%d\n",

View File

@ -435,7 +435,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
av_frame_copy_props(out, in);
if (color->source == COLOR_MODE_NONE) {
enum AVColorSpace cs = av_frame_get_colorspace(in);
enum AVColorSpace cs = in->colorspace;
enum ColorMode source;
switch(cs) {
@ -456,11 +456,11 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
color->mode = color->source * 5 + color->dest;
switch(color->dest) {
case COLOR_MODE_BT709 : av_frame_set_colorspace(out, AVCOL_SPC_BT709) ; break;
case COLOR_MODE_FCC : av_frame_set_colorspace(out, AVCOL_SPC_FCC) ; break;
case COLOR_MODE_SMPTE240M: av_frame_set_colorspace(out, AVCOL_SPC_SMPTE240M) ; break;
case COLOR_MODE_BT601 : av_frame_set_colorspace(out, AVCOL_SPC_BT470BG) ; break;
case COLOR_MODE_BT2020 : av_frame_set_colorspace(out, AVCOL_SPC_BT2020_NCL); break;
case COLOR_MODE_BT709 : out->colorspace = AVCOL_SPC_BT709 ; break;
case COLOR_MODE_FCC : out->colorspace = AVCOL_SPC_FCC ; break;
case COLOR_MODE_SMPTE240M: out->colorspace = AVCOL_SPC_SMPTE240M ; break;
case COLOR_MODE_BT601 : out->colorspace = AVCOL_SPC_BT470BG ; break;
case COLOR_MODE_BT2020 : out->colorspace = AVCOL_SPC_BT2020_NCL; break;
}
td.src = in;

View File

@ -258,8 +258,8 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
s->var_values[VAR_N] = link->frame_count_out;
s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(link->time_base);
s->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ?
NAN : av_frame_get_pkt_pos(frame);
s->var_values[VAR_POS] = frame->pkt_pos == -1 ?
NAN : frame->pkt_pos;
s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);

View File

@ -169,7 +169,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
// ignore first 2 frames - they may be empty
if (++s->frame_nb > 0) {
metadata = avpriv_frame_get_metadatap(frame);
metadata = &frame->metadata;
// Reset the crop area every reset_count frames, if reset_count is > 0
if (s->reset_count > 0 && s->frame_nb > s->reset_count) {

View File

@ -386,7 +386,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
}
av_frame_copy_props(out, in);
metadata = avpriv_frame_get_metadatap(out);
metadata = &out->metadata;
if (metadata) {
uint8_t value[128];

View File

@ -446,7 +446,7 @@ static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
params.surface = input_surface;
params.surface_region = &input_region;
params.surface_color_standard = vaapi_proc_colour_standard(
av_frame_get_colorspace(input_frame));
input_frame->colorspace);
params.output_region = NULL;
params.output_background_color = 0xff000000;

View File

@ -1452,7 +1452,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
NAN : frame->pts * av_q2d(inlink->time_base);
s->var_values[VAR_PICT_TYPE] = frame->pict_type;
s->metadata = av_frame_get_metadata(frame);
s->metadata = frame->metadata;
draw_text(ctx, frame, frame->width, frame->height);

View File

@ -254,7 +254,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = inlink->dst->outputs[0];
EQContext *eq = ctx->priv;
AVFrame *out;
int64_t pos = av_frame_get_pkt_pos(in);
int64_t pos = in->pkt_pos;
const AVPixFmtDescriptor *desc;
int i;

View File

@ -120,7 +120,7 @@ static void filter(AVFilterContext *ctx)
Type type, best_type;
RepeatedField repeat;
int match = 0;
AVDictionary **metadata = avpriv_frame_get_metadatap(idet->cur);
AVDictionary **metadata = &idet->cur->metadata;
for (i = 0; i < idet->csp->nb_components; i++) {
int w = idet->cur->width;

View File

@ -97,7 +97,7 @@ static int query_formats(AVFilterContext *ctx)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVDictionary **metadata = avpriv_frame_get_metadatap(in);
AVDictionary **metadata = &in->metadata;
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
OCRContext *s = ctx->priv;

View File

@ -715,7 +715,7 @@ static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic,
AVFilterLink *inlink = ctx->inputs[0];
if (s->eval_mode == EVAL_MODE_FRAME) {
int64_t pos = av_frame_get_pkt_pos(mainpic);
int64_t pos = mainpic->pkt_pos;
s->var_values[VAR_N] = inlink->frame_count_out;
s->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?

View File

@ -148,7 +148,7 @@ static AVFrame *do_psnr(AVFilterContext *ctx, AVFrame *main,
PSNRContext *s = ctx->priv;
double comp_mse[4], mse = 0;
int j, c;
AVDictionary **metadata = avpriv_frame_get_metadatap(main);
AVDictionary **metadata = &main->metadata;
compute_images_mse(s, (const uint8_t **)main->data, main->linesize,
(const uint8_t **)ref->data, ref->linesize,

View File

@ -214,11 +214,11 @@ static void extract_line(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *in
snprintf(key, sizeof(key), "lavfi.readeia608.%d.cc", s->nb_found);
snprintf(value, sizeof(value), "0x%02X%02X", byte[0], byte[1]);
av_dict_set(avpriv_frame_get_metadatap(in), key, value, 0);
av_dict_set(&in->metadata, key, value, 0);
snprintf(key, sizeof(key), "lavfi.readeia608.%d.line", s->nb_found);
snprintf(value, sizeof(value), "%d", line);
av_dict_set(avpriv_frame_get_metadatap(in), key, value, 0);
av_dict_set(&in->metadata, key, value, 0);
}
s->nb_found++;

View File

@ -221,9 +221,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
int found;
found = read_vitc_line(s, frame->data[0], frame->linesize[0], inlink->w, inlink->h);
av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.readvitc.found", (found ? "1" : "0"), 0);
av_dict_set(&frame->metadata, "lavfi.readvitc.found", (found ? "1" : "0"), 0);
if (found)
av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.readvitc.tc_str", make_vitc_tc_string(s->tcbuf, s->line_data), 0);
av_dict_set(&frame->metadata, "lavfi.readvitc.tc_str", make_vitc_tc_string(s->tcbuf, s->line_data), 0);
return ff_filter_frame(outlink, frame);
}

View File

@ -409,7 +409,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
char buf[32];
int in_range;
if (av_frame_get_colorspace(in) == AVCOL_SPC_YCGCO)
if (in->colorspace == AVCOL_SPC_YCGCO)
av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
if( in->width != link->w
@ -456,7 +456,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
if(scale->output_is_pal)
avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
in_range = av_frame_get_color_range(in);
in_range = in->color_range;
if ( scale->in_color_matrix
|| scale->out_color_matrix
@ -471,7 +471,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
&brightness, &contrast, &saturation);
if (scale->in_color_matrix)
inv_table = parse_yuv_type(scale->in_color_matrix, av_frame_get_colorspace(in));
inv_table = parse_yuv_type(scale->in_color_matrix, in->colorspace);
if (scale->out_color_matrix)
table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
else if (scale->in_color_matrix)
@ -496,7 +496,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
table, out_full,
brightness, contrast, saturation);
av_frame_set_color_range(out, out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG);
out->color_range = out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
}
av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,

View File

@ -108,7 +108,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
"fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
"checksum:%08"PRIX32" plane_checksum:[%08"PRIX32,
inlink->frame_count_out,
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), frame->pkt_pos,
desc->name,
frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
frame->width, frame->height,

View File

@ -283,7 +283,7 @@ static double ssim_db(double ssim, double weight)
static AVFrame *do_ssim(AVFilterContext *ctx, AVFrame *main,
const AVFrame *ref)
{
AVDictionary **metadata = avpriv_frame_get_metadatap(main);
AVDictionary **metadata = &main->metadata;
SSIMContext *s = ctx->priv;
float c[4], ssimv = 0.0;
int i;

View File

@ -99,7 +99,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
var_values[VAR_N] = inlink->frame_count_out;
var_values[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base);
var_values[VAR_POS] = av_frame_get_pkt_pos(in) == -1 ? NAN : av_frame_get_pkt_pos(in);
var_values[VAR_POS] = in->pkt_pos ? NAN : in->pkt_pos;
ret = av_expr_parse_and_eval(&dw, s->w,
var_names, &var_values[0],

View File

@ -1212,7 +1212,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
if (s->colorspace) {
s->cs = (s->depth - 8) * 2 + s->colorspace - 1;
} else {
switch (av_frame_get_colorspace(in)) {
switch (in->colorspace) {
case AVCOL_SPC_SMPTE170M:
case AVCOL_SPC_BT470BG:
s->cs = (s->depth - 8) * 2 + 0;

View File

@ -2754,7 +2754,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
return AVERROR(ENOMEM);
}
out->pts = in->pts;
av_frame_set_color_range(out, AVCOL_RANGE_JPEG);
out->color_range = AVCOL_RANGE_JPEG;
for (k = 0; k < s->dcomp; k++) {
if (s->bits <= 8) {

View File

@ -1376,7 +1376,7 @@ static void smptebars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
int r_w, r_h, w_h, p_w, p_h, i, tmp, x = 0;
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
av_frame_set_colorspace(picref, AVCOL_SPC_BT470BG);
picref->colorspace = AVCOL_SPC_BT470BG;
r_w = FFALIGN((test->w + 6) / 7, 1 << pixdesc->log2_chroma_w);
r_h = FFALIGN(test->h * 2 / 3, 1 << pixdesc->log2_chroma_h);
@ -1443,7 +1443,7 @@ static void smptehdbars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
int d_w, r_w, r_h, l_w, i, tmp, x = 0, y = 0;
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
av_frame_set_colorspace(picref, AVCOL_SPC_BT709);
picref->colorspace = AVCOL_SPC_BT709;
d_w = FFALIGN(test->w / 8, 1 << pixdesc->log2_chroma_w);
r_h = FFALIGN(test->h * 7 / 12, 1 << pixdesc->log2_chroma_h);