1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-08-15 14:13:16 +02:00

avcodec/decode: simplify applying or exporting skip samples

Copy packet side data to the output frame in ff_decode_frame_props_from_pkt()
instead of in discard_samples(), having the latter only applying the skip if
required.
This will be useful for the following commit.

Signed-off-by: James Almer <jamrial@gmail.com>
This commit is contained in:
James Almer
2023-07-12 18:29:09 -03:00
parent 78c52a8ca4
commit 7db4c3eaa6

View File

@@ -293,26 +293,22 @@ static int discard_samples(AVCodecContext *avctx, AVFrame *frame, int64_t *disca
{ {
AVCodecInternal *avci = avctx->internal; AVCodecInternal *avci = avctx->internal;
int ret = 0; int ret = 0;
uint8_t *side; AVFrameSideData *side;
size_t side_size;
uint32_t discard_padding = 0; uint32_t discard_padding = 0;
uint8_t skip_reason = 0; uint8_t skip_reason = 0;
uint8_t discard_reason = 0; uint8_t discard_reason = 0;
side = av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size); side = av_frame_get_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES);
if (side && side_size >= 10) { if (side && side->size >= 10) {
avci->skip_samples = AV_RL32(side); avci->skip_samples = AV_RL32(side->data);
avci->skip_samples = FFMAX(0, avci->skip_samples); avci->skip_samples = FFMAX(0, avci->skip_samples);
discard_padding = AV_RL32(side + 4); discard_padding = AV_RL32(side->data + 4);
av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n", av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
avci->skip_samples, (int)discard_padding); avci->skip_samples, (int)discard_padding);
skip_reason = AV_RL8(side + 8); skip_reason = AV_RL8(side->data + 8);
discard_reason = AV_RL8(side + 9); discard_reason = AV_RL8(side->data + 9);
} }
if (!frame->buf[0])
return AVERROR(EAGAIN);
if (frame->format == AV_SAMPLE_FMT_NONE) if (frame->format == AV_SAMPLE_FMT_NONE)
frame->format = avctx->sample_fmt; frame->format = avctx->sample_fmt;
if (!frame->ch_layout.nb_channels) { if (!frame->ch_layout.nb_channels) {
@@ -333,15 +329,27 @@ FF_ENABLE_DEPRECATION_WARNINGS
if (!frame->sample_rate) if (!frame->sample_rate)
frame->sample_rate = avctx->sample_rate; frame->sample_rate = avctx->sample_rate;
if ((frame->flags & AV_FRAME_FLAG_DISCARD) && if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { if (!side && (avci->skip_samples || discard_padding))
side = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
if (side && (avci->skip_samples || discard_padding)) {
AV_WL32(side->data, avci->skip_samples);
AV_WL32(side->data + 4, discard_padding);
AV_WL8(side->data + 8, skip_reason);
AV_WL8(side->data + 9, discard_reason);
avci->skip_samples = 0;
}
return ret;
}
av_frame_remove_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES);
if ((frame->flags & AV_FRAME_FLAG_DISCARD)) {
avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples); avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
*discarded_samples += frame->nb_samples; *discarded_samples += frame->nb_samples;
return AVERROR(EAGAIN); return AVERROR(EAGAIN);
} }
if (avci->skip_samples > 0 && if (avci->skip_samples > 0) {
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
if (frame->nb_samples <= avci->skip_samples){ if (frame->nb_samples <= avci->skip_samples){
*discarded_samples += frame->nb_samples; *discarded_samples += frame->nb_samples;
avci->skip_samples -= frame->nb_samples; avci->skip_samples -= frame->nb_samples;
@@ -372,8 +380,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
} }
} }
if (discard_padding > 0 && discard_padding <= frame->nb_samples && if (discard_padding > 0 && discard_padding <= frame->nb_samples) {
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
if (discard_padding == frame->nb_samples) { if (discard_padding == frame->nb_samples) {
*discarded_samples += frame->nb_samples; *discarded_samples += frame->nb_samples;
return AVERROR(EAGAIN); return AVERROR(EAGAIN);
@@ -392,17 +399,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
} }
} }
if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
if (fside) {
AV_WL32(fside->data, avci->skip_samples);
AV_WL32(fside->data + 4, discard_padding);
AV_WL8(fside->data + 8, skip_reason);
AV_WL8(fside->data + 9, discard_reason);
avci->skip_samples = 0;
}
}
return ret; return ret;
} }
@@ -465,15 +461,13 @@ FF_ENABLE_DEPRECATION_WARNINGS
} }
emms_c(); emms_c();
if (!got_frame)
av_frame_unref(frame);
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) { if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
ret = (!got_frame || frame->flags & AV_FRAME_FLAG_DISCARD) ret = (!got_frame || frame->flags & AV_FRAME_FLAG_DISCARD)
? AVERROR(EAGAIN) ? AVERROR(EAGAIN)
: 0; : 0;
} else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) { } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
ret = discard_samples(avctx, frame, discarded_samples); ret = !got_frame ? AVERROR(EAGAIN)
: discard_samples(avctx, frame, discarded_samples);
} }
if (ret == AVERROR(EAGAIN)) if (ret == AVERROR(EAGAIN))
@@ -1386,6 +1380,7 @@ int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx,
{ AV_PKT_DATA_ICC_PROFILE, AV_FRAME_DATA_ICC_PROFILE }, { AV_PKT_DATA_ICC_PROFILE, AV_FRAME_DATA_ICC_PROFILE },
{ AV_PKT_DATA_S12M_TIMECODE, AV_FRAME_DATA_S12M_TIMECODE }, { AV_PKT_DATA_S12M_TIMECODE, AV_FRAME_DATA_S12M_TIMECODE },
{ AV_PKT_DATA_DYNAMIC_HDR10_PLUS, AV_FRAME_DATA_DYNAMIC_HDR_PLUS }, { AV_PKT_DATA_DYNAMIC_HDR10_PLUS, AV_FRAME_DATA_DYNAMIC_HDR_PLUS },
{ AV_PKT_DATA_SKIP_SAMPLES, AV_FRAME_DATA_SKIP_SAMPLES },
}; };
frame->pts = pkt->pts; frame->pts = pkt->pts;