mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Merge commit '182821cff43f5f977004d105b86c47ceb20d00d6'
* commit '182821cff43f5f977004d105b86c47ceb20d00d6': dca: decode directly to the user-provided AVFrame cook: decode directly to the user-provided AVFrame comfortnoise: decode directly to the user-provided AVFrame bmvaudio: decode directly to the user-provided AVFrame pcm: decode directly to the user-provided AVFrame Conflicts: libavcodec/pcm.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
0a5138695a
@ -302,32 +302,23 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct BMVAudioDecContext {
|
|
||||||
AVFrame frame;
|
|
||||||
} BMVAudioDecContext;
|
|
||||||
|
|
||||||
static const int bmv_aud_mults[16] = {
|
static const int bmv_aud_mults[16] = {
|
||||||
16512, 8256, 4128, 2064, 1032, 516, 258, 192, 129, 88, 64, 56, 48, 40, 36, 32
|
16512, 8256, 4128, 2064, 1032, 516, 258, 192, 129, 88, 64, 56, 48, 40, 36, 32
|
||||||
};
|
};
|
||||||
|
|
||||||
static av_cold int bmv_aud_decode_init(AVCodecContext *avctx)
|
static av_cold int bmv_aud_decode_init(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
BMVAudioDecContext *c = avctx->priv_data;
|
|
||||||
|
|
||||||
avctx->channels = 2;
|
avctx->channels = 2;
|
||||||
avctx->channel_layout = AV_CH_LAYOUT_STEREO;
|
avctx->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&c->frame);
|
|
||||||
avctx->coded_frame = &c->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data,
|
static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
BMVAudioDecContext *c = avctx->priv_data;
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int blocks = 0, total_blocks, i;
|
int blocks = 0, total_blocks, i;
|
||||||
@ -343,12 +334,12 @@ static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
c->frame.nb_samples = total_blocks * 32;
|
frame->nb_samples = total_blocks * 32;
|
||||||
if ((ret = ff_get_buffer(avctx, &c->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
output_samples = (int16_t *)c->frame.data[0];
|
output_samples = (int16_t *)frame->data[0];
|
||||||
|
|
||||||
for (blocks = 0; blocks < total_blocks; blocks++) {
|
for (blocks = 0; blocks < total_blocks; blocks++) {
|
||||||
uint8_t code = *buf++;
|
uint8_t code = *buf++;
|
||||||
@ -362,7 +353,6 @@ static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = c->frame;
|
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
@ -383,7 +373,6 @@ AVCodec ff_bmv_audio_decoder = {
|
|||||||
.name = "bmv_audio",
|
.name = "bmv_audio",
|
||||||
.type = AVMEDIA_TYPE_AUDIO,
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
.id = AV_CODEC_ID_BMV_AUDIO,
|
.id = AV_CODEC_ID_BMV_AUDIO,
|
||||||
.priv_data_size = sizeof(BMVAudioDecContext),
|
|
||||||
.init = bmv_aud_decode_init,
|
.init = bmv_aud_decode_init,
|
||||||
.decode = bmv_aud_decode_frame,
|
.decode = bmv_aud_decode_frame,
|
||||||
.capabilities = CODEC_CAP_DR1,
|
.capabilities = CODEC_CAP_DR1,
|
||||||
|
@ -28,7 +28,6 @@
|
|||||||
#include "libavutil/lfg.h"
|
#include "libavutil/lfg.h"
|
||||||
|
|
||||||
typedef struct CNGContext {
|
typedef struct CNGContext {
|
||||||
AVFrame avframe;
|
|
||||||
float *refl_coef, *target_refl_coef;
|
float *refl_coef, *target_refl_coef;
|
||||||
float *lpc_coef;
|
float *lpc_coef;
|
||||||
int order;
|
int order;
|
||||||
@ -58,8 +57,6 @@ static av_cold int cng_decode_init(AVCodecContext *avctx)
|
|||||||
avctx->channels = 1;
|
avctx->channels = 1;
|
||||||
avctx->sample_rate = 8000;
|
avctx->sample_rate = 8000;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&p->avframe);
|
|
||||||
avctx->coded_frame = &p->avframe;
|
|
||||||
p->order = 12;
|
p->order = 12;
|
||||||
avctx->frame_size = 640;
|
avctx->frame_size = 640;
|
||||||
p->refl_coef = av_mallocz(p->order * sizeof(*p->refl_coef));
|
p->refl_coef = av_mallocz(p->order * sizeof(*p->refl_coef));
|
||||||
@ -105,7 +102,7 @@ static void cng_decode_flush(AVCodecContext *avctx)
|
|||||||
static int cng_decode_frame(AVCodecContext *avctx, void *data,
|
static int cng_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
CNGContext *p = avctx->priv_data;
|
CNGContext *p = avctx->priv_data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
@ -144,19 +141,18 @@ static int cng_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
ff_celp_lp_synthesis_filterf(p->filter_out + p->order, p->lpc_coef,
|
ff_celp_lp_synthesis_filterf(p->filter_out + p->order, p->lpc_coef,
|
||||||
p->excitation, avctx->frame_size, p->order);
|
p->excitation, avctx->frame_size, p->order);
|
||||||
|
|
||||||
p->avframe.nb_samples = avctx->frame_size;
|
frame->nb_samples = avctx->frame_size;
|
||||||
if ((ret = ff_get_buffer(avctx, &p->avframe)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
buf_out = (int16_t *)p->avframe.data[0];
|
buf_out = (int16_t *)frame->data[0];
|
||||||
for (i = 0; i < avctx->frame_size; i++)
|
for (i = 0; i < avctx->frame_size; i++)
|
||||||
buf_out[i] = p->filter_out[i + p->order];
|
buf_out[i] = p->filter_out[i + p->order];
|
||||||
memcpy(p->filter_out, p->filter_out + avctx->frame_size,
|
memcpy(p->filter_out, p->filter_out + avctx->frame_size,
|
||||||
p->order * sizeof(*p->filter_out));
|
p->order * sizeof(*p->filter_out));
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = p->avframe;
|
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,6 @@ typedef struct cook {
|
|||||||
|
|
||||||
AVCodecContext* avctx;
|
AVCodecContext* avctx;
|
||||||
DSPContext dsp;
|
DSPContext dsp;
|
||||||
AVFrame frame;
|
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
/* stream data */
|
/* stream data */
|
||||||
int num_vectors;
|
int num_vectors;
|
||||||
@ -956,6 +955,7 @@ static int decode_subpacket(COOKContext *q, COOKSubpacket *p,
|
|||||||
static int cook_decode_frame(AVCodecContext *avctx, void *data,
|
static int cook_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
COOKContext *q = avctx->priv_data;
|
COOKContext *q = avctx->priv_data;
|
||||||
@ -969,12 +969,12 @@ static int cook_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
if (q->discarded_packets >= 2) {
|
if (q->discarded_packets >= 2) {
|
||||||
q->frame.nb_samples = q->samples_per_channel;
|
frame->nb_samples = q->samples_per_channel;
|
||||||
if ((ret = ff_get_buffer(avctx, &q->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
samples = (float **)q->frame.extended_data;
|
samples = (float **)frame->extended_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* estimate subpacket sizes */
|
/* estimate subpacket sizes */
|
||||||
@ -1016,7 +1016,6 @@ static int cook_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *) data = q->frame;
|
|
||||||
|
|
||||||
return avctx->block_align;
|
return avctx->block_align;
|
||||||
}
|
}
|
||||||
@ -1269,9 +1268,6 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
|||||||
else
|
else
|
||||||
avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&q->frame);
|
|
||||||
avctx->coded_frame = &q->frame;
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
dump_cook_context(q);
|
dump_cook_context(q);
|
||||||
#endif
|
#endif
|
||||||
|
@ -349,7 +349,6 @@ static av_always_inline int get_bitalloc(GetBitContext *gb, BitAlloc *ba,
|
|||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
AVFrame frame;
|
|
||||||
/* Frame header */
|
/* Frame header */
|
||||||
int frame_type; ///< type of the current frame
|
int frame_type; ///< type of the current frame
|
||||||
int samples_deficit; ///< deficit sample count
|
int samples_deficit; ///< deficit sample count
|
||||||
@ -2067,6 +2066,7 @@ static void dca_exss_parse_header(DCAContext *s)
|
|||||||
static int dca_decode_frame(AVCodecContext *avctx, void *data,
|
static int dca_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int channel_mask;
|
int channel_mask;
|
||||||
@ -2354,17 +2354,17 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = 256 * (s->sample_blocks / 8);
|
frame->nb_samples = 256 * (s->sample_blocks / 8);
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
samples_flt = (float **) s->frame.extended_data;
|
samples_flt = (float **)frame->extended_data;
|
||||||
|
|
||||||
/* allocate buffer for extra channels if downmixing */
|
/* allocate buffer for extra channels if downmixing */
|
||||||
if (avctx->channels < full_channels) {
|
if (avctx->channels < full_channels) {
|
||||||
ret = av_samples_get_buffer_size(NULL, full_channels - channels,
|
ret = av_samples_get_buffer_size(NULL, full_channels - channels,
|
||||||
s->frame.nb_samples,
|
frame->nb_samples,
|
||||||
avctx->sample_fmt, 0);
|
avctx->sample_fmt, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
@ -2377,7 +2377,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
ret = av_samples_fill_arrays((uint8_t **)s->extra_channels, NULL,
|
ret = av_samples_fill_arrays((uint8_t **)s->extra_channels, NULL,
|
||||||
s->extra_channels_buffer,
|
s->extra_channels_buffer,
|
||||||
full_channels - channels,
|
full_channels - channels,
|
||||||
s->frame.nb_samples, avctx->sample_fmt, 0);
|
frame->nb_samples, avctx->sample_fmt, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2457,7 +2457,6 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
s->lfe_data[i] = s->lfe_data[i + lfe_samples];
|
s->lfe_data[i] = s->lfe_data[i + lfe_samples];
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *) data = s->frame;
|
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
@ -2491,9 +2490,6 @@ static av_cold int dca_decode_init(AVCodecContext *avctx)
|
|||||||
avctx->channels = avctx->request_channels;
|
avctx->channels = avctx->request_channels;
|
||||||
}
|
}
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,7 +230,6 @@ static int pcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
}
|
}
|
||||||
|
|
||||||
typedef struct PCMDecode {
|
typedef struct PCMDecode {
|
||||||
AVFrame frame;
|
|
||||||
short table[256];
|
short table[256];
|
||||||
} PCMDecode;
|
} PCMDecode;
|
||||||
|
|
||||||
@ -262,9 +261,6 @@ static av_cold int pcm_decode_init(AVCodecContext *avctx)
|
|||||||
if (avctx->sample_fmt == AV_SAMPLE_FMT_S32)
|
if (avctx->sample_fmt == AV_SAMPLE_FMT_S32)
|
||||||
avctx->bits_per_raw_sample = av_get_bits_per_sample(avctx->codec_id);
|
avctx->bits_per_raw_sample = av_get_bits_per_sample(avctx->codec_id);
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -289,7 +285,7 @@ static av_cold int pcm_decode_init(AVCodecContext *avctx)
|
|||||||
n /= avctx->channels; \
|
n /= avctx->channels; \
|
||||||
for (c = 0; c < avctx->channels; c++) { \
|
for (c = 0; c < avctx->channels; c++) { \
|
||||||
int i; \
|
int i; \
|
||||||
dst = s->frame.extended_data[c]; \
|
dst = frame->extended_data[c]; \
|
||||||
for (i = n; i > 0; i--) { \
|
for (i = n; i > 0; i--) { \
|
||||||
uint ## size ## _t v = bytestream_get_ ## endian(&src); \
|
uint ## size ## _t v = bytestream_get_ ## endian(&src); \
|
||||||
AV_WN ## size ## A(dst, (v - offset) << shift); \
|
AV_WN ## size ## A(dst, (v - offset) << shift); \
|
||||||
@ -303,6 +299,7 @@ static int pcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
const uint8_t *src = avpkt->data;
|
const uint8_t *src = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
PCMDecode *s = avctx->priv_data;
|
PCMDecode *s = avctx->priv_data;
|
||||||
|
AVFrame *frame = data;
|
||||||
int sample_size, c, n, ret, samples_per_block;
|
int sample_size, c, n, ret, samples_per_block;
|
||||||
uint8_t *samples;
|
uint8_t *samples;
|
||||||
int32_t *dst_int32_t;
|
int32_t *dst_int32_t;
|
||||||
@ -358,12 +355,12 @@ static int pcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
n = buf_size / sample_size;
|
n = buf_size / sample_size;
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = n * samples_per_block / avctx->channels;
|
frame->nb_samples = n * samples_per_block / avctx->channels;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
samples = s->frame.data[0];
|
samples = frame->data[0];
|
||||||
|
|
||||||
switch (avctx->codec_id) {
|
switch (avctx->codec_id) {
|
||||||
case AV_CODEC_ID_PCM_U32LE:
|
case AV_CODEC_ID_PCM_U32LE:
|
||||||
@ -410,7 +407,7 @@ static int pcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
n /= avctx->channels;
|
n /= avctx->channels;
|
||||||
for (c = 0; c < avctx->channels; c++) {
|
for (c = 0; c < avctx->channels; c++) {
|
||||||
int i;
|
int i;
|
||||||
samples = s->frame.extended_data[c];
|
samples = frame->extended_data[c];
|
||||||
for (i = n; i > 0; i--)
|
for (i = n; i > 0; i--)
|
||||||
*samples++ = *src++ + 128;
|
*samples++ = *src++ + 128;
|
||||||
}
|
}
|
||||||
@ -466,7 +463,7 @@ static int pcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
#endif /* HAVE_BIGENDIAN */
|
#endif /* HAVE_BIGENDIAN */
|
||||||
n /= avctx->channels;
|
n /= avctx->channels;
|
||||||
for (c = 0; c < avctx->channels; c++) {
|
for (c = 0; c < avctx->channels; c++) {
|
||||||
samples = s->frame.extended_data[c];
|
samples = frame->extended_data[c];
|
||||||
bytestream_get_buffer(&src, samples, n * sample_size);
|
bytestream_get_buffer(&src, samples, n * sample_size);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -488,7 +485,7 @@ static int pcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
case AV_CODEC_ID_PCM_DVD:
|
case AV_CODEC_ID_PCM_DVD:
|
||||||
{
|
{
|
||||||
const uint8_t *src8;
|
const uint8_t *src8;
|
||||||
dst_int32_t = (int32_t *)s->frame.data[0];
|
dst_int32_t = (int32_t *)frame->data[0];
|
||||||
n /= avctx->channels;
|
n /= avctx->channels;
|
||||||
switch (avctx->bits_per_coded_sample) {
|
switch (avctx->bits_per_coded_sample) {
|
||||||
case 20:
|
case 20:
|
||||||
@ -521,7 +518,7 @@ static int pcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int i;
|
int i;
|
||||||
n /= avctx->channels;
|
n /= avctx->channels;
|
||||||
for (c = 0; c < avctx->channels; c++) {
|
for (c = 0; c < avctx->channels; c++) {
|
||||||
dst_int32_t = (int32_t *)s->frame.extended_data[c];
|
dst_int32_t = (int32_t *)frame->extended_data[c];
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
// extract low 20 bits and expand to 32 bits
|
// extract low 20 bits and expand to 32 bits
|
||||||
*dst_int32_t++ = (src[2] << 28) |
|
*dst_int32_t++ = (src[2] << 28) |
|
||||||
@ -545,7 +542,6 @@ static int pcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user