1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-11-26 19:01:44 +02:00

fftools/ffmpeg_filter: pass sub2video canvas size through InputFilterOptions

Rather than read them directly from InputStream.

This is a step towards avoiding the assumption that filtergraph inputs
are always fed by demuxers.
This commit is contained in:
Anton Khirnov 2024-02-13 12:18:27 +01:00
parent bd3c1c194b
commit 6315f78e0c
3 changed files with 33 additions and 36 deletions

View File

@ -255,6 +255,9 @@ typedef struct InputFilterOptions {
int64_t trim_end_us;
uint8_t *name;
int sub2video_width;
int sub2video_height;
} InputFilterOptions;
typedef struct InputFilter {
@ -366,10 +369,6 @@ typedef struct InputStream {
int fix_sub_duration;
struct sub2video {
int w, h;
} sub2video;
/* decoded data from this stream goes into all those filters
* currently video and audio only */
InputFilter **filters;

View File

@ -992,6 +992,26 @@ int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple,
return ret;
if (ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE) {
/* Compute the size of the canvas for the subtitles stream.
If the subtitles codecpar has set a size, use it. Otherwise use the
maximum dimensions of the video streams in the same file. */
opts->sub2video_width = ist->par->width;
opts->sub2video_height = ist->par->height;
if (!(opts->sub2video_width && opts->sub2video_height)) {
for (int j = 0; j < d->f.nb_streams; j++) {
AVCodecParameters *par1 = d->f.streams[j]->par;
if (par1->codec_type == AVMEDIA_TYPE_VIDEO) {
opts->sub2video_width = FFMAX(opts->sub2video_width, par1->width);
opts->sub2video_height = FFMAX(opts->sub2video_height, par1->height);
}
}
}
if (!(opts->sub2video_width && opts->sub2video_height)) {
opts->sub2video_width = FFMAX(opts->sub2video_width, 720);
opts->sub2video_height = FFMAX(opts->sub2video_height, 576);
}
if (!d->pkt_heartbeat) {
d->pkt_heartbeat = av_packet_alloc();
if (!d->pkt_heartbeat)
@ -1357,27 +1377,6 @@ static int ist_add(const OptionsContext *o, Demuxer *d, AVStream *st)
return ret;
}
}
/* Compute the size of the canvas for the subtitles stream.
If the subtitles codecpar has set a size, use it. Otherwise use the
maximum dimensions of the video streams in the same file. */
ist->sub2video.w = par->width;
ist->sub2video.h = par->height;
if (!(ist->sub2video.w && ist->sub2video.h)) {
for (int j = 0; j < ic->nb_streams; j++) {
AVCodecParameters *par1 = ic->streams[j]->codecpar;
if (par1->codec_type == AVMEDIA_TYPE_VIDEO) {
ist->sub2video.w = FFMAX(ist->sub2video.w, par1->width);
ist->sub2video.h = FFMAX(ist->sub2video.h, par1->height);
}
}
}
if (!(ist->sub2video.w && ist->sub2video.h)) {
ist->sub2video.w = FFMAX(ist->sub2video.w, 720);
ist->sub2video.h = FFMAX(ist->sub2video.h, 576);
}
break;
}
case AVMEDIA_TYPE_ATTACHMENT:

View File

@ -689,6 +689,16 @@ static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
ifp->sub2video.frame = av_frame_alloc();
if (!ifp->sub2video.frame)
return AVERROR(ENOMEM);
ifp->width = ifp->opts.sub2video_width;
ifp->height = ifp->opts.sub2video_height;
/* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
palettes for all rectangles are identical or compatible */
ifp->format = AV_PIX_FMT_RGB32;
av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
ifp->width, ifp->height);
}
return 0;
@ -1829,17 +1839,6 @@ int ifilter_parameters_from_dec(InputFilter *ifilter, const AVCodecContext *dec)
ret = av_channel_layout_copy(&ifp->fallback.ch_layout, &dec->ch_layout);
if (ret < 0)
return ret;
} else {
// for subtitles (i.e. sub2video) we set the actual parameters,
// rather than just fallback
ifp->width = ifp->ist->sub2video.w;
ifp->height = ifp->ist->sub2video.h;
/* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
palettes for all rectangles are identical or compatible */
ifp->format = AV_PIX_FMT_RGB32;
av_log(NULL, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n", ifp->width, ifp->height);
}
return 0;