2006-08-29 11:15:38 +03:00
|
|
|
/*
|
2013-11-27 05:58:28 +03:00
|
|
|
* AviSynth/AvxSynth support
|
2016-04-27 19:45:23 +02:00
|
|
|
* Copyright (c) 2012 AvxSynth Team
|
2006-10-07 18:30:46 +03:00
|
|
|
*
|
2013-02-27 02:02:20 +03:00
|
|
|
* This file is part of FFmpeg
|
2006-10-07 18:30:46 +03:00
|
|
|
*
|
|
|
|
* FFmpeg is free software; you can redistribute it and/or
|
2006-08-29 11:15:38 +03:00
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2006-10-07 18:30:46 +03:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2006-08-29 11:15:38 +03:00
|
|
|
*
|
2006-10-07 18:30:46 +03:00
|
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
2006-08-29 11:15:38 +03:00
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2006-10-07 18:30:46 +03:00
|
|
|
* License along with FFmpeg; if not, write to the Free Software
|
2006-08-29 11:15:38 +03:00
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
#include "libavutil/attributes.h"
|
2013-09-04 21:03:14 +03:00
|
|
|
#include "libavutil/internal.h"
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
#include "libavcodec/internal.h"
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
|
2006-08-29 11:15:38 +03:00
|
|
|
#include "avformat.h"
|
2011-11-29 21:28:15 +03:00
|
|
|
#include "internal.h"
|
2015-03-17 14:32:06 +02:00
|
|
|
#include "config.h"
|
2013-02-27 02:02:20 +03:00
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Enable function pointer definitions for runtime loading. */
|
2013-02-27 02:02:20 +03:00
|
|
|
#define AVSC_NO_DECLSPEC
|
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Platform-specific directives for AviSynth vs AvxSynth. */
|
2013-02-27 02:02:20 +03:00
|
|
|
#ifdef _WIN32
|
2016-10-31 05:30:43 +02:00
|
|
|
#include "compat/w32dlfcn.h"
|
2013-02-27 02:02:20 +03:00
|
|
|
#undef EXTERN_C
|
|
|
|
#include "compat/avisynth/avisynth_c.h"
|
|
|
|
#define AVISYNTH_LIB "avisynth"
|
2013-10-28 23:52:43 +03:00
|
|
|
#define USING_AVISYNTH
|
2013-02-27 02:02:20 +03:00
|
|
|
#else
|
|
|
|
#include <dlfcn.h>
|
|
|
|
#include "compat/avisynth/avxsynth_c.h"
|
2015-03-17 14:32:06 +02:00
|
|
|
#define AVISYNTH_NAME "libavxsynth"
|
|
|
|
#define AVISYNTH_LIB AVISYNTH_NAME SLIBSUF
|
2013-02-27 02:02:20 +03:00
|
|
|
#endif
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-10-28 23:52:47 +03:00
|
|
|
typedef struct AviSynthLibrary {
|
2013-02-27 02:02:20 +03:00
|
|
|
void *library;
|
2013-10-28 23:52:45 +03:00
|
|
|
#define AVSC_DECLARE_FUNC(name) name ## _func name
|
2013-08-07 03:57:16 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_bit_blt);
|
|
|
|
AVSC_DECLARE_FUNC(avs_clip_get_error);
|
2013-02-27 02:02:20 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_create_script_environment);
|
|
|
|
AVSC_DECLARE_FUNC(avs_delete_script_environment);
|
2013-08-07 03:57:16 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_get_audio);
|
2013-02-27 02:02:20 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_get_error);
|
2013-08-07 03:57:16 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_get_frame);
|
2013-08-07 03:57:17 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_get_version);
|
2013-02-27 02:02:20 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_get_video_info);
|
2013-08-07 03:57:16 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_invoke);
|
2013-02-27 02:02:20 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_release_clip);
|
2013-08-07 03:57:16 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_release_value);
|
2013-02-27 02:02:20 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_release_video_frame);
|
2013-08-07 03:57:16 +03:00
|
|
|
AVSC_DECLARE_FUNC(avs_take_clip);
|
2015-03-13 04:52:29 +02:00
|
|
|
#ifdef USING_AVISYNTH
|
|
|
|
AVSC_DECLARE_FUNC(avs_bits_per_pixel);
|
|
|
|
AVSC_DECLARE_FUNC(avs_get_height_p);
|
|
|
|
AVSC_DECLARE_FUNC(avs_get_pitch_p);
|
|
|
|
AVSC_DECLARE_FUNC(avs_get_read_ptr_p);
|
|
|
|
AVSC_DECLARE_FUNC(avs_get_row_size_p);
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
AVSC_DECLARE_FUNC(avs_is_planar_rgb);
|
|
|
|
AVSC_DECLARE_FUNC(avs_is_planar_rgba);
|
2015-03-13 04:52:29 +02:00
|
|
|
#endif
|
2013-02-27 02:02:20 +03:00
|
|
|
#undef AVSC_DECLARE_FUNC
|
|
|
|
} AviSynthLibrary;
|
|
|
|
|
2013-10-28 23:52:47 +03:00
|
|
|
typedef struct AviSynthContext {
|
2013-02-27 02:02:20 +03:00
|
|
|
AVS_ScriptEnvironment *env;
|
|
|
|
AVS_Clip *clip;
|
|
|
|
const AVS_VideoInfo *vi;
|
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* avisynth_read_packet_video() iterates over this. */
|
2013-02-27 02:02:20 +03:00
|
|
|
int n_planes;
|
|
|
|
const int *planes;
|
|
|
|
|
|
|
|
int curr_stream;
|
|
|
|
int curr_frame;
|
|
|
|
int64_t curr_sample;
|
|
|
|
|
|
|
|
int error;
|
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Linked list pointers. */
|
2013-02-27 02:02:20 +03:00
|
|
|
struct AviSynthContext *next;
|
2013-10-28 23:52:47 +03:00
|
|
|
} AviSynthContext;
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static const int avs_planes_packed[1] = { 0 };
|
|
|
|
static const int avs_planes_grey[1] = { AVS_PLANAR_Y };
|
|
|
|
static const int avs_planes_yuv[3] = { AVS_PLANAR_Y, AVS_PLANAR_U,
|
|
|
|
AVS_PLANAR_V };
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
#ifdef USING_AVISYNTH
|
|
|
|
static const int avs_planes_rgb[3] = { AVS_PLANAR_G, AVS_PLANAR_B,
|
|
|
|
AVS_PLANAR_R };
|
|
|
|
static const int avs_planes_yuva[4] = { AVS_PLANAR_Y, AVS_PLANAR_U,
|
|
|
|
AVS_PLANAR_V, AVS_PLANAR_A };
|
|
|
|
static const int avs_planes_rgba[4] = { AVS_PLANAR_G, AVS_PLANAR_B,
|
|
|
|
AVS_PLANAR_R, AVS_PLANAR_A };
|
|
|
|
#endif
|
2013-02-27 02:02:20 +03:00
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* A conflict between C++ global objects, atexit, and dynamic loading requires
|
|
|
|
* us to register our own atexit handler to prevent double freeing. */
|
2013-12-01 14:03:35 +03:00
|
|
|
static AviSynthLibrary avs_library;
|
2013-10-28 23:52:45 +03:00
|
|
|
static int avs_atexit_called = 0;
|
2013-02-27 02:02:20 +03:00
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Linked list of AviSynthContexts. An atexit handler destroys this list. */
|
2013-02-27 02:02:20 +03:00
|
|
|
static AviSynthContext *avs_ctx_list = NULL;
|
|
|
|
|
|
|
|
static av_cold void avisynth_atexit_handler(void);
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static av_cold int avisynth_load_library(void)
|
|
|
|
{
|
2016-10-31 05:30:43 +02:00
|
|
|
avs_library.library = dlopen(AVISYNTH_LIB, RTLD_NOW | RTLD_LOCAL);
|
2013-12-01 14:03:35 +03:00
|
|
|
if (!avs_library.library)
|
2013-02-27 02:02:20 +03:00
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
|
2013-12-01 14:03:35 +03:00
|
|
|
#define LOAD_AVS_FUNC(name, continue_on_fail) \
|
2017-03-31 21:38:37 +02:00
|
|
|
avs_library.name = dlsym(avs_library.library, #name); \
|
2013-12-01 14:03:35 +03:00
|
|
|
if (!continue_on_fail && !avs_library.name) \
|
2013-10-28 23:52:45 +03:00
|
|
|
goto fail;
|
|
|
|
|
2013-08-07 03:57:16 +03:00
|
|
|
LOAD_AVS_FUNC(avs_bit_blt, 0);
|
|
|
|
LOAD_AVS_FUNC(avs_clip_get_error, 0);
|
2013-02-27 02:02:20 +03:00
|
|
|
LOAD_AVS_FUNC(avs_create_script_environment, 0);
|
|
|
|
LOAD_AVS_FUNC(avs_delete_script_environment, 0);
|
2013-08-07 03:57:16 +03:00
|
|
|
LOAD_AVS_FUNC(avs_get_audio, 0);
|
2013-02-27 02:02:20 +03:00
|
|
|
LOAD_AVS_FUNC(avs_get_error, 1); // New to AviSynth 2.6
|
2013-08-07 03:57:16 +03:00
|
|
|
LOAD_AVS_FUNC(avs_get_frame, 0);
|
2013-08-07 03:57:17 +03:00
|
|
|
LOAD_AVS_FUNC(avs_get_version, 0);
|
2013-02-27 02:02:20 +03:00
|
|
|
LOAD_AVS_FUNC(avs_get_video_info, 0);
|
2013-08-07 03:57:16 +03:00
|
|
|
LOAD_AVS_FUNC(avs_invoke, 0);
|
2013-02-27 02:02:20 +03:00
|
|
|
LOAD_AVS_FUNC(avs_release_clip, 0);
|
2013-08-07 03:57:16 +03:00
|
|
|
LOAD_AVS_FUNC(avs_release_value, 0);
|
2013-02-27 02:02:20 +03:00
|
|
|
LOAD_AVS_FUNC(avs_release_video_frame, 0);
|
2013-08-07 03:57:16 +03:00
|
|
|
LOAD_AVS_FUNC(avs_take_clip, 0);
|
2015-03-13 04:52:29 +02:00
|
|
|
#ifdef USING_AVISYNTH
|
2015-04-02 21:38:53 +02:00
|
|
|
LOAD_AVS_FUNC(avs_bits_per_pixel, 1);
|
|
|
|
LOAD_AVS_FUNC(avs_get_height_p, 1);
|
|
|
|
LOAD_AVS_FUNC(avs_get_pitch_p, 1);
|
|
|
|
LOAD_AVS_FUNC(avs_get_read_ptr_p, 1);
|
|
|
|
LOAD_AVS_FUNC(avs_get_row_size_p, 1);
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
LOAD_AVS_FUNC(avs_is_planar_rgb, 1);
|
|
|
|
LOAD_AVS_FUNC(avs_is_planar_rgba, 1);
|
2015-03-13 04:52:29 +02:00
|
|
|
#endif
|
2013-02-27 02:02:20 +03:00
|
|
|
#undef LOAD_AVS_FUNC
|
|
|
|
|
|
|
|
atexit(avisynth_atexit_handler);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
2016-10-31 05:30:43 +02:00
|
|
|
dlclose(avs_library.library);
|
2013-02-27 02:02:20 +03:00
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Note that avisynth_context_create and avisynth_context_destroy
|
|
|
|
* do not allocate or free the actual context! That is taken care of
|
|
|
|
* by libavformat. */
|
2013-10-28 23:52:45 +03:00
|
|
|
static av_cold int avisynth_context_create(AVFormatContext *s)
|
|
|
|
{
|
2013-10-28 23:52:48 +03:00
|
|
|
AviSynthContext *avs = s->priv_data;
|
2013-02-27 02:02:20 +03:00
|
|
|
int ret;
|
|
|
|
|
2013-12-01 14:03:35 +03:00
|
|
|
if (!avs_library.library)
|
2013-02-27 02:02:20 +03:00
|
|
|
if (ret = avisynth_load_library())
|
|
|
|
return ret;
|
|
|
|
|
2013-12-01 14:03:35 +03:00
|
|
|
avs->env = avs_library.avs_create_script_environment(3);
|
|
|
|
if (avs_library.avs_get_error) {
|
|
|
|
const char *error = avs_library.avs_get_error(avs->env);
|
2013-02-27 02:02:20 +03:00
|
|
|
if (error) {
|
|
|
|
av_log(s, AV_LOG_ERROR, "%s\n", error);
|
|
|
|
return AVERROR_UNKNOWN;
|
2006-08-29 11:15:38 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-27 05:36:18 +03:00
|
|
|
if (!avs_ctx_list) {
|
|
|
|
avs_ctx_list = avs;
|
|
|
|
} else {
|
2013-10-28 23:52:45 +03:00
|
|
|
avs->next = avs_ctx_list;
|
2013-03-27 05:36:18 +03:00
|
|
|
avs_ctx_list = avs;
|
|
|
|
}
|
|
|
|
|
2013-02-27 02:02:20 +03:00
|
|
|
return 0;
|
2006-08-29 11:15:38 +03:00
|
|
|
}
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static av_cold void avisynth_context_destroy(AviSynthContext *avs)
|
|
|
|
{
|
2013-02-27 02:02:20 +03:00
|
|
|
if (avs_atexit_called)
|
2013-10-28 23:52:45 +03:00
|
|
|
return;
|
2013-02-27 02:02:20 +03:00
|
|
|
|
|
|
|
if (avs == avs_ctx_list) {
|
|
|
|
avs_ctx_list = avs->next;
|
|
|
|
} else {
|
|
|
|
AviSynthContext *prev = avs_ctx_list;
|
|
|
|
while (prev->next != avs)
|
|
|
|
prev = prev->next;
|
|
|
|
prev->next = avs->next;
|
|
|
|
}
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-02-27 02:02:20 +03:00
|
|
|
if (avs->clip) {
|
2013-12-01 14:03:35 +03:00
|
|
|
avs_library.avs_release_clip(avs->clip);
|
2013-02-27 02:02:20 +03:00
|
|
|
avs->clip = NULL;
|
|
|
|
}
|
|
|
|
if (avs->env) {
|
2013-12-01 14:03:35 +03:00
|
|
|
avs_library.avs_delete_script_environment(avs->env);
|
2013-02-27 02:02:20 +03:00
|
|
|
avs->env = NULL;
|
|
|
|
}
|
|
|
|
}
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static av_cold void avisynth_atexit_handler(void)
|
|
|
|
{
|
2013-02-27 02:02:20 +03:00
|
|
|
AviSynthContext *avs = avs_ctx_list;
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-02-27 02:02:20 +03:00
|
|
|
while (avs) {
|
|
|
|
AviSynthContext *next = avs->next;
|
|
|
|
avisynth_context_destroy(avs);
|
|
|
|
avs = next;
|
|
|
|
}
|
2016-10-31 05:30:43 +02:00
|
|
|
dlclose(avs_library.library);
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-02-27 02:02:20 +03:00
|
|
|
avs_atexit_called = 1;
|
|
|
|
}
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Create AVStream from audio and video data. */
|
2013-10-28 23:52:45 +03:00
|
|
|
static int avisynth_create_stream_video(AVFormatContext *s, AVStream *st)
|
|
|
|
{
|
2013-02-27 02:02:20 +03:00
|
|
|
AviSynthContext *avs = s->priv_data;
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
int planar = 0; // 0: packed, 1: YUV, 2: Y8, 3: Planar RGB, 4: YUVA, 5: Planar RGBA
|
2013-02-27 02:02:20 +03:00
|
|
|
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
|
|
|
|
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
|
|
|
|
st->codecpar->width = avs->vi->width;
|
|
|
|
st->codecpar->height = avs->vi->height;
|
2013-10-28 23:52:45 +03:00
|
|
|
|
|
|
|
st->avg_frame_rate = (AVRational) { avs->vi->fps_numerator,
|
|
|
|
avs->vi->fps_denominator };
|
|
|
|
st->start_time = 0;
|
|
|
|
st->duration = avs->vi->num_frames;
|
|
|
|
st->nb_frames = avs->vi->num_frames;
|
2015-08-10 22:09:52 +02:00
|
|
|
avpriv_set_pts_info(st, 32, avs->vi->fps_denominator, avs->vi->fps_numerator);
|
2013-02-27 02:02:20 +03:00
|
|
|
|
|
|
|
switch (avs->vi->pixel_type) {
|
2013-10-28 23:52:43 +03:00
|
|
|
#ifdef USING_AVISYNTH
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
/* 10~16-bit YUV pix_fmts (AviSynth+) */
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
case AVS_CS_YUV444P10:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV444P10;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUV422P10:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV422P10;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUV420P10:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV420P10;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUV444P12:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV444P12;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUV422P12:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV422P12;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUV420P12:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV420P12;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUV444P14:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV444P14;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUV422P14:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV422P14;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUV420P14:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV420P14;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUV444P16:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV444P16;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUV422P16:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV422P16;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUV420P16:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV420P16;
|
|
|
|
planar = 1;
|
|
|
|
break;
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
/* 8~16-bit YUV pix_fmts with Alpha (AviSynth+) */
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
case AVS_CS_YUVA444:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUVA444P;
|
|
|
|
planar = 4;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUVA422:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUVA422P;
|
|
|
|
planar = 4;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUVA420:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUVA420P;
|
|
|
|
planar = 4;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUVA444P10:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUVA444P10;
|
|
|
|
planar = 4;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUVA422P10:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUVA422P10;
|
|
|
|
planar = 4;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUVA420P10:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUVA420P10;
|
|
|
|
planar = 4;
|
|
|
|
break;
|
2019-03-24 22:25:37 +02:00
|
|
|
case AVS_CS_YUVA422P12:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUVA422P12;
|
|
|
|
planar = 4;
|
|
|
|
break;
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
case AVS_CS_YUVA444P16:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUVA444P16;
|
|
|
|
planar = 4;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUVA422P16:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUVA422P16;
|
|
|
|
planar = 4;
|
|
|
|
break;
|
|
|
|
case AVS_CS_YUVA420P16:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_YUVA420P16;
|
|
|
|
planar = 4;
|
|
|
|
break;
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
/* Planar RGB pix_fmts (AviSynth+) */
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
case AVS_CS_RGBP:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GBRP;
|
|
|
|
planar = 3;
|
|
|
|
break;
|
|
|
|
case AVS_CS_RGBP10:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GBRP10;
|
|
|
|
planar = 3;
|
|
|
|
break;
|
|
|
|
case AVS_CS_RGBP12:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GBRP12;
|
|
|
|
planar = 3;
|
|
|
|
break;
|
|
|
|
case AVS_CS_RGBP14:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GBRP14;
|
|
|
|
planar = 3;
|
|
|
|
break;
|
|
|
|
case AVS_CS_RGBP16:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GBRP16;
|
|
|
|
planar = 3;
|
|
|
|
break;
|
2019-03-24 22:25:37 +02:00
|
|
|
/* Single precision floating point Planar RGB (AviSynth+) */
|
|
|
|
case AVS_CS_RGBPS:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GBRPF32;
|
|
|
|
planar = 3;
|
|
|
|
break;
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
/* Planar RGB pix_fmts with Alpha (AviSynth+) */
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
case AVS_CS_RGBAP:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GBRAP;
|
|
|
|
planar = 5;
|
|
|
|
break;
|
|
|
|
case AVS_CS_RGBAP10:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GBRAP10;
|
|
|
|
planar = 5;
|
|
|
|
break;
|
|
|
|
case AVS_CS_RGBAP12:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GBRAP12;
|
|
|
|
planar = 5;
|
|
|
|
break;
|
|
|
|
case AVS_CS_RGBAP16:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GBRAP16;
|
|
|
|
planar = 5;
|
|
|
|
break;
|
2019-03-24 22:25:37 +02:00
|
|
|
/* Single precision floating point Planar RGB with Alpha (AviSynth+) */
|
|
|
|
case AVS_CS_RGBAPS:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GBRAPF32;
|
|
|
|
planar = 5;
|
|
|
|
break;
|
|
|
|
/* 10~16-bit gray pix_fmts (AviSynth+) */
|
|
|
|
case AVS_CS_Y10:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GRAY10;
|
|
|
|
planar = 2;
|
|
|
|
break;
|
|
|
|
case AVS_CS_Y12:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GRAY12;
|
|
|
|
planar = 2;
|
|
|
|
break;
|
|
|
|
case AVS_CS_Y14:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GRAY14;
|
|
|
|
planar = 2;
|
|
|
|
break;
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
case AVS_CS_Y16:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GRAY16;
|
|
|
|
planar = 2;
|
|
|
|
break;
|
2019-03-24 22:25:37 +02:00
|
|
|
/* Single precision floating point gray (AviSynth+) */
|
|
|
|
case AVS_CS_Y32:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_GRAYF32;
|
|
|
|
planar = 2;
|
|
|
|
break;
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
/* pix_fmts added in AviSynth 2.6 */
|
2013-02-27 02:02:20 +03:00
|
|
|
case AVS_CS_YV24:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV444P;
|
|
|
|
planar = 1;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case AVS_CS_YV16:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV422P;
|
|
|
|
planar = 1;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case AVS_CS_YV411:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV411P;
|
|
|
|
planar = 1;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case AVS_CS_Y8:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->format = AV_PIX_FMT_GRAY8;
|
|
|
|
planar = 2;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
/* 16-bit packed RGB pix_fmts (AviSynth+) */
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
case AVS_CS_BGR48:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_BGR48;
|
|
|
|
break;
|
|
|
|
case AVS_CS_BGR64:
|
|
|
|
st->codecpar->format = AV_PIX_FMT_BGRA64;
|
|
|
|
break;
|
2013-02-27 02:02:20 +03:00
|
|
|
#endif
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
/* AviSynth 2.5 and AvxSynth pix_fmts */
|
2013-02-27 02:02:20 +03:00
|
|
|
case AVS_CS_BGR24:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->format = AV_PIX_FMT_BGR24;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case AVS_CS_BGR32:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->format = AV_PIX_FMT_RGB32;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case AVS_CS_YUY2:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->format = AV_PIX_FMT_YUYV422;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case AVS_CS_YV12:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV420P;
|
|
|
|
planar = 1;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case AVS_CS_I420: // Is this even used anywhere?
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->format = AV_PIX_FMT_YUV420P;
|
|
|
|
planar = 1;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
default:
|
2013-10-28 23:52:45 +03:00
|
|
|
av_log(s, AV_LOG_ERROR,
|
|
|
|
"unknown AviSynth colorspace %d\n", avs->vi->pixel_type);
|
2013-02-27 02:02:20 +03:00
|
|
|
avs->error = 1;
|
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
}
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-02-27 02:02:20 +03:00
|
|
|
switch (planar) {
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
#ifdef USING_AVISYNTH
|
|
|
|
case 5: // Planar RGB + Alpha
|
|
|
|
avs->n_planes = 4;
|
|
|
|
avs->planes = avs_planes_rgba;
|
|
|
|
break;
|
|
|
|
case 4: // YUV + Alpha
|
|
|
|
avs->n_planes = 4;
|
|
|
|
avs->planes = avs_planes_yuva;
|
|
|
|
break;
|
|
|
|
case 3: // Planar RGB
|
|
|
|
avs->n_planes = 3;
|
|
|
|
avs->planes = avs_planes_rgb;
|
|
|
|
break;
|
|
|
|
#endif
|
2013-02-27 02:02:20 +03:00
|
|
|
case 2: // Y8
|
|
|
|
avs->n_planes = 1;
|
2013-10-28 23:52:45 +03:00
|
|
|
avs->planes = avs_planes_grey;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case 1: // YUV
|
|
|
|
avs->n_planes = 3;
|
2013-10-28 23:52:45 +03:00
|
|
|
avs->planes = avs_planes_yuv;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
avs->n_planes = 1;
|
2013-10-28 23:52:45 +03:00
|
|
|
avs->planes = avs_planes_packed;
|
2013-02-27 02:02:20 +03:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static int avisynth_create_stream_audio(AVFormatContext *s, AVStream *st)
|
|
|
|
{
|
2013-02-27 02:02:20 +03:00
|
|
|
AviSynthContext *avs = s->priv_data;
|
|
|
|
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
|
|
|
|
st->codecpar->sample_rate = avs->vi->audio_samples_per_second;
|
|
|
|
st->codecpar->channels = avs->vi->nchannels;
|
2016-04-10 21:58:15 +02:00
|
|
|
st->duration = avs->vi->num_audio_samples;
|
2015-08-10 22:09:52 +02:00
|
|
|
avpriv_set_pts_info(st, 64, 1, avs->vi->audio_samples_per_second);
|
2013-02-27 02:02:20 +03:00
|
|
|
|
|
|
|
switch (avs->vi->sample_type) {
|
|
|
|
case AVS_SAMPLE_INT8:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->codec_id = AV_CODEC_ID_PCM_U8;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case AVS_SAMPLE_INT16:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->codec_id = AV_CODEC_ID_PCM_S16LE;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case AVS_SAMPLE_INT24:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->codec_id = AV_CODEC_ID_PCM_S24LE;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case AVS_SAMPLE_INT32:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
case AVS_SAMPLE_FLOAT:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
st->codecpar->codec_id = AV_CODEC_ID_PCM_F32LE;
|
2013-02-27 02:02:20 +03:00
|
|
|
break;
|
|
|
|
default:
|
2013-10-28 23:52:45 +03:00
|
|
|
av_log(s, AV_LOG_ERROR,
|
|
|
|
"unknown AviSynth sample type %d\n", avs->vi->sample_type);
|
2013-02-27 02:02:20 +03:00
|
|
|
avs->error = 1;
|
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static int avisynth_create_stream(AVFormatContext *s)
|
|
|
|
{
|
2013-02-27 02:02:20 +03:00
|
|
|
AviSynthContext *avs = s->priv_data;
|
|
|
|
AVStream *st;
|
|
|
|
int ret;
|
|
|
|
int id = 0;
|
|
|
|
|
|
|
|
if (avs_has_video(avs->vi)) {
|
|
|
|
st = avformat_new_stream(s, NULL);
|
|
|
|
if (!st)
|
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
st->id = id++;
|
|
|
|
if (ret = avisynth_create_stream_video(s, st))
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (avs_has_audio(avs->vi)) {
|
|
|
|
st = avformat_new_stream(s, NULL);
|
|
|
|
if (!st)
|
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
st->id = id++;
|
|
|
|
if (ret = avisynth_create_stream_audio(s, st))
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 0;
|
2006-08-29 11:15:38 +03:00
|
|
|
}
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static int avisynth_open_file(AVFormatContext *s)
|
|
|
|
{
|
2013-10-28 23:52:48 +03:00
|
|
|
AviSynthContext *avs = s->priv_data;
|
2013-02-27 02:02:20 +03:00
|
|
|
AVS_Value arg, val;
|
|
|
|
int ret;
|
2013-10-28 23:52:43 +03:00
|
|
|
#ifdef USING_AVISYNTH
|
2013-06-21 16:57:03 +03:00
|
|
|
char filename_ansi[MAX_PATH * 4];
|
|
|
|
wchar_t filename_wc[MAX_PATH * 4];
|
|
|
|
#endif
|
2013-02-27 02:02:20 +03:00
|
|
|
|
|
|
|
if (ret = avisynth_context_create(s))
|
|
|
|
return ret;
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-10-28 23:52:43 +03:00
|
|
|
#ifdef USING_AVISYNTH
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Convert UTF-8 to ANSI code page */
|
2013-06-21 16:57:03 +03:00
|
|
|
MultiByteToWideChar(CP_UTF8, 0, s->filename, -1, filename_wc, MAX_PATH * 4);
|
2013-10-28 23:52:45 +03:00
|
|
|
WideCharToMultiByte(CP_THREAD_ACP, 0, filename_wc, -1, filename_ansi,
|
|
|
|
MAX_PATH * 4, NULL, NULL);
|
2013-06-21 16:57:03 +03:00
|
|
|
arg = avs_new_value_string(filename_ansi);
|
|
|
|
#else
|
2013-02-27 02:02:20 +03:00
|
|
|
arg = avs_new_value_string(s->filename);
|
2013-06-21 16:57:03 +03:00
|
|
|
#endif
|
2013-12-01 14:03:35 +03:00
|
|
|
val = avs_library.avs_invoke(avs->env, "Import", arg, 0);
|
2013-02-27 02:02:20 +03:00
|
|
|
if (avs_is_error(val)) {
|
|
|
|
av_log(s, AV_LOG_ERROR, "%s\n", avs_as_error(val));
|
|
|
|
ret = AVERROR_UNKNOWN;
|
|
|
|
goto fail;
|
2006-08-29 11:15:38 +03:00
|
|
|
}
|
2013-02-27 02:02:20 +03:00
|
|
|
if (!avs_is_clip(val)) {
|
2013-10-29 21:42:00 +03:00
|
|
|
av_log(s, AV_LOG_ERROR, "AviSynth script did not return a clip\n");
|
2013-02-27 02:02:20 +03:00
|
|
|
ret = AVERROR_UNKNOWN;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2013-12-01 14:03:35 +03:00
|
|
|
avs->clip = avs_library.avs_take_clip(val, avs->env);
|
|
|
|
avs->vi = avs_library.avs_get_video_info(avs->clip);
|
2013-02-27 02:02:20 +03:00
|
|
|
|
2015-03-24 21:23:30 +02:00
|
|
|
#ifdef USING_AVISYNTH
|
2015-04-02 21:38:54 +02:00
|
|
|
/* On Windows, FFmpeg supports AviSynth interface version 6 or higher.
|
|
|
|
* This includes AviSynth 2.6 RC1 or higher, and AviSynth+ r1718 or higher,
|
|
|
|
* and excludes 2.5 and the 2.6 alphas. Since AvxSynth identifies itself
|
|
|
|
* as interface version 3 like 2.5.8, this needs to be special-cased. */
|
2015-03-24 21:23:30 +02:00
|
|
|
|
2015-04-02 21:38:54 +02:00
|
|
|
if (avs_library.avs_get_version(avs->clip) < 6) {
|
2015-03-24 21:23:30 +02:00
|
|
|
av_log(s, AV_LOG_ERROR,
|
2015-04-02 21:38:54 +02:00
|
|
|
"AviSynth version is too old. Please upgrade to either AviSynth 2.6 >= RC1 or AviSynth+ >= r1718.\n");
|
2015-03-24 21:23:30 +02:00
|
|
|
ret = AVERROR_UNKNOWN;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Release the AVS_Value as it will go out of scope. */
|
2013-12-01 14:03:35 +03:00
|
|
|
avs_library.avs_release_value(val);
|
2013-02-27 02:02:20 +03:00
|
|
|
|
|
|
|
if (ret = avisynth_create_stream(s))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
return 0;
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2013-02-27 02:02:20 +03:00
|
|
|
fail:
|
|
|
|
avisynth_context_destroy(avs);
|
|
|
|
return ret;
|
2006-08-29 11:15:38 +03:00
|
|
|
}
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static void avisynth_next_stream(AVFormatContext *s, AVStream **st,
|
|
|
|
AVPacket *pkt, int *discard)
|
|
|
|
{
|
2013-02-27 02:02:20 +03:00
|
|
|
AviSynthContext *avs = s->priv_data;
|
2006-08-29 11:15:38 +03:00
|
|
|
|
2014-01-09 20:33:39 +03:00
|
|
|
avs->curr_stream++;
|
2013-02-27 02:02:20 +03:00
|
|
|
avs->curr_stream %= s->nb_streams;
|
|
|
|
|
2014-01-09 20:33:39 +03:00
|
|
|
*st = s->streams[avs->curr_stream];
|
2013-02-27 02:02:20 +03:00
|
|
|
if ((*st)->discard == AVDISCARD_ALL)
|
|
|
|
*discard = 1;
|
|
|
|
else
|
|
|
|
*discard = 0;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Copy AviSynth clip data into an AVPacket. */
|
2013-10-28 23:52:45 +03:00
|
|
|
static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt,
|
|
|
|
int discard)
|
|
|
|
{
|
2013-02-27 02:02:20 +03:00
|
|
|
AviSynthContext *avs = s->priv_data;
|
|
|
|
AVS_VideoFrame *frame;
|
2013-04-10 01:38:27 +03:00
|
|
|
unsigned char *dst_p;
|
|
|
|
const unsigned char *src_p;
|
2014-01-09 20:33:39 +03:00
|
|
|
int n, i, plane, rowsize, planeheight, pitch, bits;
|
2013-04-10 01:38:27 +03:00
|
|
|
const char *error;
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
int avsplus av_unused;
|
2013-02-27 02:02:20 +03:00
|
|
|
|
|
|
|
if (avs->curr_frame >= avs->vi->num_frames)
|
|
|
|
return AVERROR_EOF;
|
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* This must happen even if the stream is discarded to prevent desync. */
|
2013-04-10 01:36:58 +03:00
|
|
|
n = avs->curr_frame++;
|
2013-02-27 02:02:20 +03:00
|
|
|
if (discard)
|
|
|
|
return 0;
|
|
|
|
|
2013-10-28 23:52:46 +03:00
|
|
|
#ifdef USING_AVISYNTH
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
/* Detect whether we're using AviSynth 2.6 or AviSynth+ by
|
|
|
|
* looking for whether avs_is_planar_rgb exists. */
|
|
|
|
if (GetProcAddress(avs_library.library, "avs_is_planar_rgb") == NULL)
|
|
|
|
avsplus = 0;
|
|
|
|
else
|
|
|
|
avsplus = 1;
|
|
|
|
|
2016-08-11 05:14:35 +02:00
|
|
|
/* avs_bits_per_pixel changed to AVSC_API with AviSynth 2.6, which
|
|
|
|
* requires going through avs_library, while AvxSynth has it under
|
|
|
|
* the older AVSC_INLINE type, so special-case this. */
|
|
|
|
|
|
|
|
bits = avs_library.avs_bits_per_pixel(avs->vi);
|
2015-03-13 04:52:29 +02:00
|
|
|
#else
|
|
|
|
bits = avs_bits_per_pixel(avs->vi);
|
2013-10-28 23:52:46 +03:00
|
|
|
#endif
|
2013-02-27 02:02:20 +03:00
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Without the cast to int64_t, calculation overflows at about 9k x 9k
|
|
|
|
* resolution. */
|
2013-10-28 23:52:45 +03:00
|
|
|
pkt->size = (((int64_t)avs->vi->width *
|
|
|
|
(int64_t)avs->vi->height) * bits) / 8;
|
2013-02-27 02:02:20 +03:00
|
|
|
if (!pkt->size)
|
|
|
|
return AVERROR_UNKNOWN;
|
2014-01-08 08:37:56 +03:00
|
|
|
|
2014-01-09 20:33:39 +03:00
|
|
|
if (av_new_packet(pkt, pkt->size) < 0)
|
2013-10-28 23:52:51 +03:00
|
|
|
return AVERROR(ENOMEM);
|
2013-02-27 02:02:20 +03:00
|
|
|
|
2014-01-09 20:33:39 +03:00
|
|
|
pkt->pts = n;
|
|
|
|
pkt->dts = n;
|
|
|
|
pkt->duration = 1;
|
|
|
|
pkt->stream_index = avs->curr_stream;
|
2014-01-08 08:37:57 +03:00
|
|
|
|
2013-12-01 14:03:35 +03:00
|
|
|
frame = avs_library.avs_get_frame(avs->clip, n);
|
|
|
|
error = avs_library.avs_clip_get_error(avs->clip);
|
2013-02-27 02:02:20 +03:00
|
|
|
if (error) {
|
|
|
|
av_log(s, AV_LOG_ERROR, "%s\n", error);
|
|
|
|
avs->error = 1;
|
2014-01-08 08:37:56 +03:00
|
|
|
av_packet_unref(pkt);
|
2013-02-27 02:02:20 +03:00
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
|
|
|
dst_p = pkt->data;
|
|
|
|
for (i = 0; i < avs->n_planes; i++) {
|
|
|
|
plane = avs->planes[i];
|
2013-10-28 23:52:43 +03:00
|
|
|
#ifdef USING_AVISYNTH
|
2015-03-13 04:52:29 +02:00
|
|
|
src_p = avs_library.avs_get_read_ptr_p(frame, plane);
|
|
|
|
pitch = avs_library.avs_get_pitch_p(frame, plane);
|
|
|
|
|
2015-03-24 21:23:30 +02:00
|
|
|
rowsize = avs_library.avs_get_row_size_p(frame, plane);
|
|
|
|
planeheight = avs_library.avs_get_height_p(frame, plane);
|
2013-08-17 00:29:55 +03:00
|
|
|
#else
|
2013-11-27 05:58:28 +03:00
|
|
|
src_p = avs_get_read_ptr_p(frame, plane);
|
|
|
|
pitch = avs_get_pitch_p(frame, plane);
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
rowsize = avs_get_row_size_p(frame, plane);
|
2013-02-27 02:02:20 +03:00
|
|
|
planeheight = avs_get_height_p(frame, plane);
|
2013-08-17 00:29:55 +03:00
|
|
|
#endif
|
2013-02-27 02:02:20 +03:00
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Flip RGB video. */
|
2013-02-27 02:02:20 +03:00
|
|
|
if (avs_is_rgb24(avs->vi) || avs_is_rgb(avs->vi)) {
|
|
|
|
src_p = src_p + (planeheight - 1) * pitch;
|
|
|
|
pitch = -pitch;
|
|
|
|
}
|
|
|
|
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
#ifdef USING_AVISYNTH
|
avisynth: Support pix_fmts added to AviSynth+
A number of new pix_fmts* have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA
10-, 12-, 14-, and 16-bit GRAY variants
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Diego Biurrun <diego@biurrun.de>
2016-10-27 04:54:18 +02:00
|
|
|
/* Flip Planar RGB video */
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
if (avsplus && (avs_library.avs_is_planar_rgb(avs->vi) ||
|
2016-08-31 02:26:08 +02:00
|
|
|
avs_library.avs_is_planar_rgba(avs->vi))) {
|
|
|
|
src_p = src_p + (planeheight - 1) * pitch;
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
pitch = -pitch;
|
2016-08-31 02:26:08 +02:00
|
|
|
}
|
avisynth: support pix_fmts added to AviSynth+
A number of new pix_fmts have been added to AviSynth+:
16-bit packed RGB and RGBA
10-, 12-, 14, and 16-bit YUV 4:2:0, 4:2:2, and 4:4:4
8-, 10-, 12-, 14-, and 16-bit Planar RGB
8-, 10-, 12-, 14-, and 16-bit Planar YUVA and Planar RGBA*
10-, 12-, 14-, and 16-bit GRAY variants*
32-bit floating point Planar YUV(A), Planar RGB(A), and GRAY*
*some of which are not currently available pix_fmts here and were
not added to the demuxer due to this
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2016-08-17 00:45:21 +02:00
|
|
|
#endif
|
|
|
|
|
2013-12-01 14:03:35 +03:00
|
|
|
avs_library.avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch,
|
2013-10-28 23:52:45 +03:00
|
|
|
rowsize, planeheight);
|
2013-02-27 02:02:20 +03:00
|
|
|
dst_p += rowsize * planeheight;
|
|
|
|
}
|
|
|
|
|
2013-12-01 14:03:35 +03:00
|
|
|
avs_library.avs_release_video_frame(frame);
|
2013-02-27 02:02:20 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt,
|
|
|
|
int discard)
|
|
|
|
{
|
2013-02-27 02:02:20 +03:00
|
|
|
AviSynthContext *avs = s->priv_data;
|
|
|
|
AVRational fps, samplerate;
|
2014-01-09 20:33:39 +03:00
|
|
|
int samples;
|
2013-04-10 01:36:58 +03:00
|
|
|
int64_t n;
|
2013-04-10 01:38:27 +03:00
|
|
|
const char *error;
|
2013-02-27 02:02:20 +03:00
|
|
|
|
|
|
|
if (avs->curr_sample >= avs->vi->num_audio_samples)
|
|
|
|
return AVERROR_EOF;
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
fps.num = avs->vi->fps_numerator;
|
|
|
|
fps.den = avs->vi->fps_denominator;
|
2013-02-27 02:02:20 +03:00
|
|
|
samplerate.num = avs->vi->audio_samples_per_second;
|
|
|
|
samplerate.den = 1;
|
|
|
|
|
|
|
|
if (avs_has_video(avs->vi)) {
|
|
|
|
if (avs->curr_frame < avs->vi->num_frames)
|
2013-10-28 23:52:45 +03:00
|
|
|
samples = av_rescale_q(avs->curr_frame, samplerate, fps) -
|
|
|
|
avs->curr_sample;
|
2013-02-27 02:02:20 +03:00
|
|
|
else
|
|
|
|
samples = av_rescale_q(1, samplerate, fps);
|
|
|
|
} else {
|
|
|
|
samples = 1000;
|
|
|
|
}
|
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* After seeking, audio may catch up with video. */
|
2013-02-27 02:02:20 +03:00
|
|
|
if (samples <= 0) {
|
|
|
|
pkt->size = 0;
|
|
|
|
pkt->data = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avs->curr_sample + samples > avs->vi->num_audio_samples)
|
|
|
|
samples = avs->vi->num_audio_samples - avs->curr_sample;
|
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* This must happen even if the stream is discarded to prevent desync. */
|
2013-10-28 23:52:45 +03:00
|
|
|
n = avs->curr_sample;
|
2013-02-27 02:02:20 +03:00
|
|
|
avs->curr_sample += samples;
|
|
|
|
if (discard)
|
|
|
|
return 0;
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
pkt->size = avs_bytes_per_channel_sample(avs->vi) *
|
|
|
|
samples * avs->vi->nchannels;
|
2013-02-27 02:02:20 +03:00
|
|
|
if (!pkt->size)
|
|
|
|
return AVERROR_UNKNOWN;
|
2014-01-08 08:37:56 +03:00
|
|
|
|
2014-01-09 20:33:39 +03:00
|
|
|
if (av_new_packet(pkt, pkt->size) < 0)
|
2013-10-28 23:52:51 +03:00
|
|
|
return AVERROR(ENOMEM);
|
2013-02-27 02:02:20 +03:00
|
|
|
|
2014-01-09 20:33:39 +03:00
|
|
|
pkt->pts = n;
|
|
|
|
pkt->dts = n;
|
|
|
|
pkt->duration = samples;
|
|
|
|
pkt->stream_index = avs->curr_stream;
|
2014-01-08 08:37:57 +03:00
|
|
|
|
2013-12-01 14:03:35 +03:00
|
|
|
avs_library.avs_get_audio(avs->clip, pkt->data, n, samples);
|
|
|
|
error = avs_library.avs_clip_get_error(avs->clip);
|
2013-02-27 02:02:20 +03:00
|
|
|
if (error) {
|
|
|
|
av_log(s, AV_LOG_ERROR, "%s\n", error);
|
|
|
|
avs->error = 1;
|
2014-01-08 08:37:56 +03:00
|
|
|
av_packet_unref(pkt);
|
2013-02-27 02:02:20 +03:00
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static av_cold int avisynth_read_header(AVFormatContext *s)
|
|
|
|
{
|
2013-02-27 02:02:20 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
// Calling library must implement a lock for thread-safe opens.
|
2017-12-21 23:54:06 +02:00
|
|
|
if (ret = ff_lock_avformat())
|
2013-02-27 02:02:20 +03:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (ret = avisynth_open_file(s)) {
|
2017-12-21 23:54:06 +02:00
|
|
|
ff_unlock_avformat();
|
2013-02-27 02:02:20 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-12-21 23:54:06 +02:00
|
|
|
ff_unlock_avformat();
|
2013-02-27 02:02:20 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt)
|
|
|
|
{
|
2013-02-27 02:02:20 +03:00
|
|
|
AviSynthContext *avs = s->priv_data;
|
|
|
|
AVStream *st;
|
|
|
|
int discard = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (avs->error)
|
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
|
2013-10-28 23:52:44 +03:00
|
|
|
/* If either stream reaches EOF, try to read the other one before
|
|
|
|
* giving up. */
|
2013-02-27 02:02:20 +03:00
|
|
|
avisynth_next_stream(s, &st, pkt, &discard);
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
2013-02-27 02:02:20 +03:00
|
|
|
ret = avisynth_read_packet_video(s, pkt, discard);
|
|
|
|
if (ret == AVERROR_EOF && avs_has_audio(avs->vi)) {
|
|
|
|
avisynth_next_stream(s, &st, pkt, &discard);
|
|
|
|
return avisynth_read_packet_audio(s, pkt, discard);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = avisynth_read_packet_audio(s, pkt, discard);
|
|
|
|
if (ret == AVERROR_EOF && avs_has_video(avs->vi)) {
|
|
|
|
avisynth_next_stream(s, &st, pkt, &discard);
|
|
|
|
return avisynth_read_packet_video(s, pkt, discard);
|
|
|
|
}
|
|
|
|
}
|
2013-10-28 23:52:53 +03:00
|
|
|
|
|
|
|
return ret;
|
2013-02-27 02:02:20 +03:00
|
|
|
}
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static av_cold int avisynth_read_close(AVFormatContext *s)
|
|
|
|
{
|
2017-12-21 23:54:06 +02:00
|
|
|
if (ff_lock_avformat())
|
2013-02-27 02:02:20 +03:00
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
|
|
|
|
avisynth_context_destroy(s->priv_data);
|
2017-12-21 23:54:06 +02:00
|
|
|
ff_unlock_avformat();
|
2013-02-27 02:02:20 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
static int avisynth_read_seek(AVFormatContext *s, int stream_index,
|
|
|
|
int64_t timestamp, int flags)
|
|
|
|
{
|
2013-02-27 02:02:20 +03:00
|
|
|
AviSynthContext *avs = s->priv_data;
|
|
|
|
AVStream *st;
|
|
|
|
AVRational fps, samplerate;
|
|
|
|
|
|
|
|
if (avs->error)
|
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
|
2013-10-28 23:52:45 +03:00
|
|
|
fps = (AVRational) { avs->vi->fps_numerator,
|
|
|
|
avs->vi->fps_denominator };
|
|
|
|
samplerate = (AVRational) { avs->vi->audio_samples_per_second, 1 };
|
2013-02-27 02:02:20 +03:00
|
|
|
|
|
|
|
st = s->streams[stream_index];
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
|
|
|
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
2013-10-28 23:52:44 +03:00
|
|
|
/* AviSynth frame counts are signed int. */
|
2013-10-28 23:52:45 +03:00
|
|
|
if ((timestamp >= avs->vi->num_frames) ||
|
|
|
|
(timestamp > INT_MAX) ||
|
|
|
|
(timestamp < 0))
|
2013-02-27 02:02:20 +03:00
|
|
|
return AVERROR_EOF;
|
|
|
|
avs->curr_frame = timestamp;
|
|
|
|
if (avs_has_audio(avs->vi))
|
|
|
|
avs->curr_sample = av_rescale_q(timestamp, samplerate, fps);
|
|
|
|
} else {
|
|
|
|
if ((timestamp >= avs->vi->num_audio_samples) || (timestamp < 0))
|
|
|
|
return AVERROR_EOF;
|
2013-10-28 23:52:44 +03:00
|
|
|
/* Force frame granularity for seeking. */
|
2013-02-27 02:02:20 +03:00
|
|
|
if (avs_has_video(avs->vi)) {
|
2013-10-28 23:52:45 +03:00
|
|
|
avs->curr_frame = av_rescale_q(timestamp, fps, samplerate);
|
2013-02-27 02:02:20 +03:00
|
|
|
avs->curr_sample = av_rescale_q(avs->curr_frame, samplerate, fps);
|
|
|
|
} else {
|
|
|
|
avs->curr_sample = timestamp;
|
|
|
|
}
|
2006-08-29 11:15:38 +03:00
|
|
|
}
|
|
|
|
|
2013-02-27 02:02:20 +03:00
|
|
|
return 0;
|
2006-08-29 11:15:38 +03:00
|
|
|
}
|
|
|
|
|
2011-01-26 00:03:28 +02:00
|
|
|
AVInputFormat ff_avisynth_demuxer = {
|
2013-01-29 20:00:34 +03:00
|
|
|
.name = "avisynth",
|
2013-02-27 02:02:20 +03:00
|
|
|
.long_name = NULL_IF_CONFIG_SMALL("AviSynth script"),
|
|
|
|
.priv_data_size = sizeof(AviSynthContext),
|
2011-07-16 23:18:12 +03:00
|
|
|
.read_header = avisynth_read_header,
|
|
|
|
.read_packet = avisynth_read_packet,
|
|
|
|
.read_close = avisynth_read_close,
|
|
|
|
.read_seek = avisynth_read_seek,
|
|
|
|
.extensions = "avs",
|
2006-08-29 11:15:38 +03:00
|
|
|
};
|