mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
avcodec/adpcmenc: remove FF_ALLOC_OR_GOTO macros and gotos lable
Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
This commit is contained in:
parent
0a1dc81723
commit
3240121509
@ -65,7 +65,6 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
|
|||||||
ADPCMEncodeContext *s = avctx->priv_data;
|
ADPCMEncodeContext *s = avctx->priv_data;
|
||||||
uint8_t *extradata;
|
uint8_t *extradata;
|
||||||
int i;
|
int i;
|
||||||
int ret = AVERROR(ENOMEM);
|
|
||||||
|
|
||||||
if (avctx->channels > 2) {
|
if (avctx->channels > 2) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n");
|
av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n");
|
||||||
@ -89,14 +88,11 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
|
|||||||
if (avctx->trellis) {
|
if (avctx->trellis) {
|
||||||
int frontier = 1 << avctx->trellis;
|
int frontier = 1 << avctx->trellis;
|
||||||
int max_paths = frontier * FREEZE_INTERVAL;
|
int max_paths = frontier * FREEZE_INTERVAL;
|
||||||
FF_ALLOC_OR_GOTO(avctx, s->paths,
|
if (!FF_ALLOC_TYPED_ARRAY(s->paths, max_paths) ||
|
||||||
max_paths * sizeof(*s->paths), error);
|
!FF_ALLOC_TYPED_ARRAY(s->node_buf, 2 * frontier) ||
|
||||||
FF_ALLOC_OR_GOTO(avctx, s->node_buf,
|
!FF_ALLOC_TYPED_ARRAY(s->nodep_buf, 2 * frontier) ||
|
||||||
2 * frontier * sizeof(*s->node_buf), error);
|
!FF_ALLOC_TYPED_ARRAY(s->trellis_hash, 65536))
|
||||||
FF_ALLOC_OR_GOTO(avctx, s->nodep_buf,
|
return AVERROR(ENOMEM);
|
||||||
2 * frontier * sizeof(*s->nodep_buf), error);
|
|
||||||
FF_ALLOC_OR_GOTO(avctx, s->trellis_hash,
|
|
||||||
65536 * sizeof(*s->trellis_hash), error);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id);
|
avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id);
|
||||||
@ -123,7 +119,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
|
|||||||
avctx->bits_per_coded_sample = 4;
|
avctx->bits_per_coded_sample = 4;
|
||||||
avctx->block_align = BLKSIZE;
|
avctx->block_align = BLKSIZE;
|
||||||
if (!(avctx->extradata = av_malloc(32 + AV_INPUT_BUFFER_PADDING_SIZE)))
|
if (!(avctx->extradata = av_malloc(32 + AV_INPUT_BUFFER_PADDING_SIZE)))
|
||||||
goto error;
|
return AVERROR(ENOMEM);
|
||||||
avctx->extradata_size = 32;
|
avctx->extradata_size = 32;
|
||||||
extradata = avctx->extradata;
|
extradata = avctx->extradata;
|
||||||
bytestream_put_le16(&extradata, avctx->frame_size);
|
bytestream_put_le16(&extradata, avctx->frame_size);
|
||||||
@ -143,8 +139,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
|
|||||||
avctx->sample_rate != 44100) {
|
avctx->sample_rate != 44100) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
|
av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
|
||||||
"22050 or 44100\n");
|
"22050 or 44100\n");
|
||||||
ret = AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
goto error;
|
|
||||||
}
|
}
|
||||||
avctx->frame_size = 512 * (avctx->sample_rate / 11025);
|
avctx->frame_size = 512 * (avctx->sample_rate / 11025);
|
||||||
break;
|
break;
|
||||||
@ -153,13 +148,10 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
|
|||||||
avctx->block_align = BLKSIZE;
|
avctx->block_align = BLKSIZE;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ret = AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
goto error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
error:
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
|
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
|
||||||
@ -523,7 +515,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
|
|
||||||
/* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
|
/* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
|
||||||
if (avctx->trellis > 0) {
|
if (avctx->trellis > 0) {
|
||||||
FF_ALLOC_ARRAY_OR_GOTO(avctx, buf, avctx->channels, blocks * 8, error);
|
if (!FF_ALLOC_TYPED_ARRAY(buf, avctx->channels * blocks * 8))
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
for (ch = 0; ch < avctx->channels; ch++) {
|
for (ch = 0; ch < avctx->channels; ch++) {
|
||||||
adpcm_compress_trellis(avctx, &samples_p[ch][1],
|
adpcm_compress_trellis(avctx, &samples_p[ch][1],
|
||||||
buf + ch * blocks * 8, &c->status[ch],
|
buf + ch * blocks * 8, &c->status[ch],
|
||||||
@ -618,7 +611,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (avctx->trellis > 0) {
|
if (avctx->trellis > 0) {
|
||||||
FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
|
if (!(buf = av_malloc(2 * n)))
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
adpcm_compress_trellis(avctx, samples + avctx->channels, buf,
|
adpcm_compress_trellis(avctx, samples + avctx->channels, buf,
|
||||||
&c->status[0], n, avctx->channels);
|
&c->status[0], n, avctx->channels);
|
||||||
if (avctx->channels == 2)
|
if (avctx->channels == 2)
|
||||||
@ -666,7 +660,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
|
|
||||||
if (avctx->trellis > 0) {
|
if (avctx->trellis > 0) {
|
||||||
n = avctx->block_align - 7 * avctx->channels;
|
n = avctx->block_align - 7 * avctx->channels;
|
||||||
FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
|
if (!(buf = av_malloc(2 * n)))
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
if (avctx->channels == 1) {
|
if (avctx->channels == 1) {
|
||||||
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
|
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
|
||||||
avctx->channels);
|
avctx->channels);
|
||||||
@ -693,7 +688,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
case AV_CODEC_ID_ADPCM_YAMAHA:
|
case AV_CODEC_ID_ADPCM_YAMAHA:
|
||||||
n = frame->nb_samples / 2;
|
n = frame->nb_samples / 2;
|
||||||
if (avctx->trellis > 0) {
|
if (avctx->trellis > 0) {
|
||||||
FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error);
|
if (!(buf = av_malloc(2 * n * 2)))
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
n *= 2;
|
n *= 2;
|
||||||
if (avctx->channels == 1) {
|
if (avctx->channels == 1) {
|
||||||
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
|
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
|
||||||
@ -724,8 +720,6 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
avpkt->size = pkt_size;
|
avpkt->size = pkt_size;
|
||||||
*got_packet_ptr = 1;
|
*got_packet_ptr = 1;
|
||||||
return 0;
|
return 0;
|
||||||
error:
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const enum AVSampleFormat sample_fmts[] = {
|
static const enum AVSampleFormat sample_fmts[] = {
|
||||||
|
Loading…
Reference in New Issue
Block a user