1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-07 11:13:41 +02:00
FFmpeg/libavcodec/tests/avcodec.c

191 lines
8.9 KiB
C
Raw Normal View History

/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "libavcodec/codec.h"
#include "libavcodec/codec_desc.h"
#include "libavcodec/codec_internal.h"
static const char *get_type_string(enum AVMediaType type)
{
const char *ret = av_get_media_type_string(type);
return ret ? ret : "unknown";
}
#define AV_LOG(...) av_log(NULL, AV_LOG_FATAL, __VA_ARGS__)
#define ERR_INTERNAL(msg, ...) \
do { \
AV_LOG(msg, codec->name __VA_ARGS__); \
ret = 1; \
} while (0)
#define ERR(msg) ERR_INTERNAL(msg, )
#define ERR_EXT(msg, ...) ERR_INTERNAL(msg, , __VA_ARGS__)
static int priv_data_size_wrong(const FFCodec *codec)
{
if (codec->priv_data_size < 0 ||
codec->p.priv_class && codec->priv_data_size < sizeof(AVClass*))
return 1;
if (!codec->p.priv_class || !codec->p.priv_class->option)
return 0;
for (const AVOption *opt = codec->p.priv_class->option; opt->name; opt++) {
if (opt->offset >= codec->priv_data_size ||
opt->type == AV_OPT_TYPE_CONST && opt->offset != 0 ||
opt->type != AV_OPT_TYPE_CONST && (opt->offset < sizeof(AVClass*) || opt->offset < 0)) {
AV_LOG("Option %s offset %d nonsensical\n",
opt->name, opt->offset);
return 1;
}
}
return 0;
}
int main(void){
void *iter = NULL;
const AVCodec *codec = NULL;
int ret = 0;
while (codec = av_codec_iterate(&iter)) {
const FFCodec *const codec2 = ffcodec(codec);
const AVCodecDescriptor *desc;
int is_decoder = 0, is_encoder = 0;
if (!codec->name) {
AV_LOG("Codec for format %s has no name\n",
avcodec_get_name(codec->id));
ret = 1;
continue;
}
if (codec->type != AVMEDIA_TYPE_VIDEO &&
codec->type != AVMEDIA_TYPE_AUDIO &&
codec->type != AVMEDIA_TYPE_SUBTITLE)
ERR_EXT("Codec %s has unsupported type %s\n",
get_type_string(codec->type));
if (codec->type != AVMEDIA_TYPE_AUDIO) {
if (codec->ch_layouts || codec->sample_fmts ||
codec->supported_samplerates)
ERR("Non-audio codec %s has audio-only fields set\n");
if (codec->capabilities & (AV_CODEC_CAP_SMALL_LAST_FRAME |
AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
ERR("Non-audio codec %s has audio-only capabilities set\n");
}
if (codec->type != AVMEDIA_TYPE_VIDEO) {
if (codec->pix_fmts || codec->supported_framerates)
ERR("Non-video codec %s has video-only fields set\n");
if (codec2->caps_internal & FF_CODEC_CAP_EXPORTS_CROPPING)
ERR("Non-video codec %s exports cropping\n");
}
if (codec2->caps_internal & FF_CODEC_CAP_SLICE_THREAD_HAS_MF &&
!(codec->capabilities & AV_CODEC_CAP_SLICE_THREADS))
ERR("Codec %s wants mainfunction despite not being "
"slice-threading capable");
if (codec2->caps_internal & FF_CODEC_CAP_AUTO_THREADS &&
!(codec->capabilities & (AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_OTHER_THREADS)))
ERR("Codec %s has private-only threading support\n");
switch (codec2->cb_type) {
case FF_CODEC_CB_TYPE_DECODE:
case FF_CODEC_CB_TYPE_DECODE_SUB:
case FF_CODEC_CB_TYPE_RECEIVE_FRAME:
is_decoder = 1;
break;
case FF_CODEC_CB_TYPE_ENCODE:
case FF_CODEC_CB_TYPE_ENCODE_SUB:
case FF_CODEC_CB_TYPE_RECEIVE_PACKET:
is_encoder = 1;
break;
default:
ERR("Codec %s has unknown cb_type\n");
continue;
}
if (is_decoder != av_codec_is_decoder(codec) ||
is_encoder != av_codec_is_encoder(codec)) {
ERR("Codec %s cb_type and av_codec_is_(de|en)coder inconsistent.\n");
continue;
}
#define CHECK(TYPE, type) (codec2->cb_type == FF_CODEC_CB_TYPE_ ## TYPE && !codec2->cb.type)
if (CHECK(DECODE, decode) || CHECK(DECODE_SUB, decode_sub) ||
CHECK(RECEIVE_PACKET, receive_packet) ||
CHECK(ENCODE, encode) || CHECK(ENCODE_SUB, encode_sub) ||
CHECK(RECEIVE_FRAME, receive_frame)) {
ERR_EXT("Codec %s does not implement its %s callback.\n",
is_decoder ? "decoding" : "encoding");
}
#undef CHECK
if (is_encoder) {
if ((codec->type == AVMEDIA_TYPE_SUBTITLE) != (codec2->cb_type == FF_CODEC_CB_TYPE_ENCODE_SUB))
ERR("Encoder %s is both subtitle encoder and not subtitle encoder.");
if (codec2->update_thread_context || codec2->update_thread_context_for_user || codec2->bsfs)
ERR("Encoder %s has decoder-only thread functions or bsf.\n");
if (codec->type == AVMEDIA_TYPE_AUDIO) {
if (!codec->sample_fmts) {
av_log(NULL, AV_LOG_FATAL, "Encoder %s is missing the sample_fmts field\n", codec->name);
ret = 1;
}
}
avcodec/decode: Add new ProgressFrame API Frame-threaded decoders with inter-frame dependencies use the ThreadFrame API for syncing. It works as follows: During init each thread allocates an AVFrame for every ThreadFrame. Thread A reads the header of its packet and allocates a buffer for an AVFrame with ff_thread_get_ext_buffer() (which also allocates a small structure that is shared with other references to this frame) and sets its fields, including side data. Then said thread calls ff_thread_finish_setup(). From that moment onward it is not allowed to change any of the AVFrame fields at all any more, but it may change fields which are an indirection away, like the content of AVFrame.data or already existing side data. After thread A has called ff_thread_finish_setup(), another thread (the user one) calls the codec's update_thread_context callback which in turn calls ff_thread_ref_frame() which calls av_frame_ref() which reads every field of A's AVFrame; hence the above restriction on modifications of the AVFrame (as any modification of the AVFrame by A after ff_thread_finish_setup() would be a data race). Of course, this av_frame_ref() also incurs allocations and therefore needs to be checked. ff_thread_ref_frame() also references the small structure used for communicating progress. This av_frame_ref() makes it awkward to propagate values that only become known during decoding to later threads (in case of frame reordering or other mechanisms of delayed output (like show-existing-frames) it's not the decoding thread, but a later thread that returns the AVFrame). E.g. for VP9 when exporting video encoding parameters as side data the number of blocks only becomes known during decoding, so one can't allocate the side data before ff_thread_finish_setup(). It is currently being done afterwards and this leads to a data race in the vp9-encparams test when using frame-threading. Returning decode_error_flags is also complicated by this. To perform this exchange a buffer shared between the references is needed (notice that simply giving the later threads a pointer to the original AVFrame does not work, because said AVFrame will be reused lateron when thread A decodes the next packet given to it). One could extend the buffer already used for progress for this or use a new one (requiring yet another allocation), yet both of these approaches have the drawback of being unnatural, ugly and requiring quite a lot of ad-hoc code. E.g. in case of the VP9 side data mentioned above one could not simply use the helper that allocates and adds the side data to an AVFrame in one go. The ProgressFrame API meanwhile offers a different solution to all of this. It is based around the idea that the most natural shared object for sharing information about an AVFrame between decoding threads is the AVFrame itself. To actually implement this the AVFrame needs to be reference counted. This is achieved by putting a (ownership) pointer into a shared (and opaque) structure that is managed by the RefStruct API and which also contains the stuff necessary for progress reporting. The users get a pointer to this AVFrame with the understanding that the owner may set all the fields until it has indicated that it has finished decoding this AVFrame; then the users are allowed to read everything. Every decoder may of course employ a different contract than the one outlined above. Given that there is no underlying av_frame_ref(), creating references to a ProgressFrame can't fail. Only ff_thread_progress_get_buffer() can fail, but given that it will replace calls to ff_thread_get_ext_buffer() it is at places where errors are already expected and properly taken care of. The ProgressFrames are empty (i.e. the AVFrame pointer is NULL and the AVFrames are not allocated during init at all) while not being in use; ff_thread_progress_get_buffer() both sets up the actual ProgressFrame and already calls ff_thread_get_buffer(). So instead of checking for ThreadFrame.f->data[0] or ThreadFrame.f->buf[0] being NULL for "this reference frame is non-existing" one should check for ProgressFrame.f. This also implies that one can only set AVFrame properties after having allocated the buffer. This restriction is not deep: if it becomes onerous for any codec, ff_thread_progress_get_buffer() can be broken up. The user would then have to get a buffer himself. In order to avoid unnecessary allocations, the shared structure is pooled, so that both the structure as well as the AVFrame itself are reused. This means that there won't be lots of unnecessary allocations in case of non-frame-threaded decoding. It might even turn out to have fewer than the current code (the current code allocates AVFrames for every DPB slot, but these are often excessively large and not completely used; the new code allocates them on demand). Pooling relies on the reset function of the RefStruct pool API, it would be impossible to implement with the AVBufferPool API. Finally, ProgressFrames have no notion of owner; they are built on top of the ThreadProgress API which also lacks such a concept. Instead every ThreadProgress and every ProgressFrame contains its own mutex and condition variable, making it completely independent of pthread_frame.c. Just like the ThreadFrame API it is simply presumed that only the actual owner/producer of a frame reports progress on said frame. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
2022-08-12 02:17:39 +02:00
if (codec2->caps_internal & (FF_CODEC_CAP_USES_PROGRESSFRAMES |
FF_CODEC_CAP_SETS_PKT_DTS |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM |
FF_CODEC_CAP_EXPORTS_CROPPING |
FF_CODEC_CAP_SETS_FRAME_PROPS) ||
codec->capabilities & (AV_CODEC_CAP_AVOID_PROBING |
AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DRAW_HORIZ_BAND))
ERR("Encoder %s has decoder-only capabilities set\n");
if (codec->capabilities & AV_CODEC_CAP_FRAME_THREADS &&
codec->capabilities & AV_CODEC_CAP_ENCODER_FLUSH)
ERR("Frame-threaded encoder %s claims to support flushing\n");
if (codec->capabilities & AV_CODEC_CAP_FRAME_THREADS &&
codec->capabilities & AV_CODEC_CAP_DELAY)
ERR("Frame-threaded encoder %s claims to have delay\n");
if (codec2->caps_internal & FF_CODEC_CAP_EOF_FLUSH &&
!(codec->capabilities & AV_CODEC_CAP_DELAY))
ERR("EOF_FLUSH encoder %s is not marked as having delay\n");
} else {
if ((codec->type == AVMEDIA_TYPE_SUBTITLE) != (codec2->cb_type == FF_CODEC_CB_TYPE_DECODE_SUB))
ERR("Subtitle decoder %s does not implement decode_sub callback\n");
if (codec->type == AVMEDIA_TYPE_SUBTITLE && codec2->bsfs)
ERR("Automatic bitstream filtering unsupported for subtitles; "
"yet decoder %s has it set\n");
if (codec->capabilities & (AV_CODEC_CAP_SMALL_LAST_FRAME |
AV_CODEC_CAP_VARIABLE_FRAME_SIZE |
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE |
AV_CODEC_CAP_ENCODER_FLUSH))
ERR("Decoder %s has encoder-only capabilities\n");
if (codec2->cb_type != FF_CODEC_CB_TYPE_DECODE &&
codec2->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS)
ERR("Decoder %s is marked as setting pkt_dts when it doesn't have"
"any effect\n");
}
if (priv_data_size_wrong(codec2))
ERR_EXT("Private context of codec %s is impossibly-sized (size %d).",
codec2->priv_data_size);
if (!(desc = avcodec_descriptor_get(codec->id))) {
ERR("Codec %s lacks a corresponding descriptor\n");
} else if (desc->type != codec->type)
ERR_EXT("The type of AVCodec %s and its AVCodecDescriptor differ: "
"%s vs %s\n",
get_type_string(codec->type), get_type_string(desc->type));
}
return ret;
}