You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-08-15 14:13:16 +02:00
s302m: convert to new channel layout API
Signed-off-by: Vittorio Giovara <vittorio.giovara@gmail.com> Signed-off-by: Anton Khirnov <anton@khirnov.net> Signed-off-by: James Almer <jamrial@gmail.com>
This commit is contained in:
committed by
James Almer
parent
1d4e6ce31c
commit
3caf14e0a4
@@ -72,19 +72,25 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
|
|||||||
else
|
else
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||||
|
|
||||||
avctx->channels = channels;
|
av_channel_layout_uninit(&avctx->ch_layout);
|
||||||
switch(channels) {
|
switch(channels) {
|
||||||
case 2:
|
case 2:
|
||||||
avctx->channel_layout = AV_CH_LAYOUT_STEREO;
|
avctx->ch_layout = (AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO;
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
avctx->channel_layout = AV_CH_LAYOUT_QUAD;
|
avctx->ch_layout = (AVChannelLayout)AV_CHANNEL_LAYOUT_QUAD;
|
||||||
break;
|
break;
|
||||||
case 6:
|
case 6:
|
||||||
avctx->channel_layout = AV_CH_LAYOUT_5POINT1_BACK;
|
avctx->ch_layout = (AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT1_BACK;
|
||||||
break;
|
break;
|
||||||
case 8:
|
case 8:
|
||||||
avctx->channel_layout = AV_CH_LAYOUT_5POINT1_BACK | AV_CH_LAYOUT_STEREO_DOWNMIX;
|
av_channel_layout_from_mask(&avctx->ch_layout,
|
||||||
|
AV_CH_LAYOUT_5POINT1_BACK | AV_CH_LAYOUT_STEREO_DOWNMIX);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
avctx->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
|
||||||
|
avctx->ch_layout.nb_channels = channels;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return frame_size;
|
return frame_size;
|
||||||
@@ -97,7 +103,7 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
AVFrame *frame = data;
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int block_size, ret;
|
int block_size, ret, channels;
|
||||||
int i;
|
int i;
|
||||||
int non_pcm_data_type = -1;
|
int non_pcm_data_type = -1;
|
||||||
|
|
||||||
@@ -110,13 +116,14 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
block_size = (avctx->bits_per_raw_sample + 4) / 4;
|
block_size = (avctx->bits_per_raw_sample + 4) / 4;
|
||||||
frame->nb_samples = 2 * (buf_size / block_size) / avctx->channels;
|
channels = avctx->ch_layout.nb_channels;
|
||||||
|
frame->nb_samples = 2 * (buf_size / block_size) / channels;
|
||||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
avctx->bit_rate = 48000 * avctx->channels * (avctx->bits_per_raw_sample + 4) +
|
avctx->bit_rate = 48000 * channels * (avctx->bits_per_raw_sample + 4) +
|
||||||
32 * 48000 / frame->nb_samples;
|
32 * 48000 / frame->nb_samples;
|
||||||
buf_size = (frame->nb_samples * avctx->channels / 2) * block_size;
|
buf_size = (frame->nb_samples * channels / 2) * block_size;
|
||||||
|
|
||||||
if (avctx->bits_per_raw_sample == 24) {
|
if (avctx->bits_per_raw_sample == 24) {
|
||||||
uint32_t *o = (uint32_t *)frame->data[0];
|
uint32_t *o = (uint32_t *)frame->data[0];
|
||||||
@@ -131,7 +138,7 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
buf += 7;
|
buf += 7;
|
||||||
}
|
}
|
||||||
o = (uint32_t *)frame->data[0];
|
o = (uint32_t *)frame->data[0];
|
||||||
if (avctx->channels == 2)
|
if (channels == 2)
|
||||||
for (i=0; i<frame->nb_samples * 2 - 6; i+=2) {
|
for (i=0; i<frame->nb_samples * 2 - 6; i+=2) {
|
||||||
if (o[i] || o[i+1] || o[i+2] || o[i+3])
|
if (o[i] || o[i+1] || o[i+2] || o[i+3])
|
||||||
break;
|
break;
|
||||||
@@ -152,7 +159,7 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
buf += 6;
|
buf += 6;
|
||||||
}
|
}
|
||||||
o = (uint32_t *)frame->data[0];
|
o = (uint32_t *)frame->data[0];
|
||||||
if (avctx->channels == 2)
|
if (channels == 2)
|
||||||
for (i=0; i<frame->nb_samples * 2 - 6; i+=2) {
|
for (i=0; i<frame->nb_samples * 2 - 6; i+=2) {
|
||||||
if (o[i] || o[i+1] || o[i+2] || o[i+3])
|
if (o[i] || o[i+1] || o[i+2] || o[i+3])
|
||||||
break;
|
break;
|
||||||
@@ -172,7 +179,7 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
buf += 5;
|
buf += 5;
|
||||||
}
|
}
|
||||||
o = (uint16_t *)frame->data[0];
|
o = (uint16_t *)frame->data[0];
|
||||||
if (avctx->channels == 2)
|
if (channels == 2)
|
||||||
for (i=0; i<frame->nb_samples * 2 - 6; i+=2) {
|
for (i=0; i<frame->nb_samples * 2 - 6; i+=2) {
|
||||||
if (o[i] || o[i+1] || o[i+2] || o[i+3])
|
if (o[i] || o[i+1] || o[i+2] || o[i+3])
|
||||||
break;
|
break;
|
||||||
|
@@ -37,10 +37,10 @@ static av_cold int s302m_encode_init(AVCodecContext *avctx)
|
|||||||
{
|
{
|
||||||
S302MEncContext *s = avctx->priv_data;
|
S302MEncContext *s = avctx->priv_data;
|
||||||
|
|
||||||
if (avctx->channels & 1 || avctx->channels > 8) {
|
if (avctx->ch_layout.nb_channels & 1 || avctx->ch_layout.nb_channels > 8) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Encoding %d channel(s) is not allowed. Only 2, 4, 6 and 8 channels are supported.\n",
|
"Encoding %d channel(s) is not allowed. Only 2, 4, 6 and 8 channels are supported.\n",
|
||||||
avctx->channels);
|
avctx->ch_layout.nb_channels);
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -61,7 +61,7 @@ static av_cold int s302m_encode_init(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
avctx->frame_size = 0;
|
avctx->frame_size = 0;
|
||||||
avctx->bit_rate = 48000 * avctx->channels *
|
avctx->bit_rate = 48000 * avctx->ch_layout.nb_channels *
|
||||||
(avctx->bits_per_raw_sample + 4);
|
(avctx->bits_per_raw_sample + 4);
|
||||||
s->framing_index = 0;
|
s->framing_index = 0;
|
||||||
|
|
||||||
@@ -72,9 +72,9 @@ static int s302m_encode2_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
const AVFrame *frame, int *got_packet_ptr)
|
const AVFrame *frame, int *got_packet_ptr)
|
||||||
{
|
{
|
||||||
S302MEncContext *s = avctx->priv_data;
|
S302MEncContext *s = avctx->priv_data;
|
||||||
|
const int nb_channels = avctx->ch_layout.nb_channels;
|
||||||
const int buf_size = AES3_HEADER_LEN +
|
const int buf_size = AES3_HEADER_LEN +
|
||||||
(frame->nb_samples *
|
(frame->nb_samples * nb_channels *
|
||||||
avctx->channels *
|
|
||||||
(avctx->bits_per_raw_sample + 4)) / 8;
|
(avctx->bits_per_raw_sample + 4)) / 8;
|
||||||
int ret, c, channels;
|
int ret, c, channels;
|
||||||
uint8_t *o;
|
uint8_t *o;
|
||||||
@@ -91,7 +91,7 @@ static int s302m_encode2_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
o = avpkt->data;
|
o = avpkt->data;
|
||||||
init_put_bits(&pb, o, buf_size);
|
init_put_bits(&pb, o, buf_size);
|
||||||
put_bits(&pb, 16, buf_size - AES3_HEADER_LEN);
|
put_bits(&pb, 16, buf_size - AES3_HEADER_LEN);
|
||||||
put_bits(&pb, 2, (avctx->channels - 2) >> 1); // number of channels
|
put_bits(&pb, 2, (nb_channels - 2) >> 1); // number of channels
|
||||||
put_bits(&pb, 8, 0); // channel ID
|
put_bits(&pb, 8, 0); // channel ID
|
||||||
put_bits(&pb, 2, (avctx->bits_per_raw_sample - 16) / 4); // bits per samples (0 = 16bit, 1 = 20bit, 2 = 24bit)
|
put_bits(&pb, 2, (avctx->bits_per_raw_sample - 16) / 4); // bits per samples (0 = 16bit, 1 = 20bit, 2 = 24bit)
|
||||||
put_bits(&pb, 4, 0); // alignments
|
put_bits(&pb, 4, 0); // alignments
|
||||||
@@ -104,7 +104,7 @@ static int s302m_encode2_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
for (c = 0; c < frame->nb_samples; c++) {
|
for (c = 0; c < frame->nb_samples; c++) {
|
||||||
uint8_t vucf = s->framing_index == 0 ? 0x10: 0;
|
uint8_t vucf = s->framing_index == 0 ? 0x10: 0;
|
||||||
|
|
||||||
for (channels = 0; channels < avctx->channels; channels += 2) {
|
for (channels = 0; channels < nb_channels; channels += 2) {
|
||||||
o[0] = ff_reverse[(samples[0] & 0x0000FF00) >> 8];
|
o[0] = ff_reverse[(samples[0] & 0x0000FF00) >> 8];
|
||||||
o[1] = ff_reverse[(samples[0] & 0x00FF0000) >> 16];
|
o[1] = ff_reverse[(samples[0] & 0x00FF0000) >> 16];
|
||||||
o[2] = ff_reverse[(samples[0] & 0xFF000000) >> 24];
|
o[2] = ff_reverse[(samples[0] & 0xFF000000) >> 24];
|
||||||
@@ -126,7 +126,7 @@ static int s302m_encode2_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
for (c = 0; c < frame->nb_samples; c++) {
|
for (c = 0; c < frame->nb_samples; c++) {
|
||||||
uint8_t vucf = s->framing_index == 0 ? 0x80: 0;
|
uint8_t vucf = s->framing_index == 0 ? 0x80: 0;
|
||||||
|
|
||||||
for (channels = 0; channels < avctx->channels; channels += 2) {
|
for (channels = 0; channels < nb_channels; channels += 2) {
|
||||||
o[0] = ff_reverse[ (samples[0] & 0x000FF000) >> 12];
|
o[0] = ff_reverse[ (samples[0] & 0x000FF000) >> 12];
|
||||||
o[1] = ff_reverse[ (samples[0] & 0x0FF00000) >> 20];
|
o[1] = ff_reverse[ (samples[0] & 0x0FF00000) >> 20];
|
||||||
o[2] = ff_reverse[((samples[0] & 0xF0000000) >> 28) | vucf];
|
o[2] = ff_reverse[((samples[0] & 0xF0000000) >> 28) | vucf];
|
||||||
@@ -147,7 +147,7 @@ static int s302m_encode2_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
for (c = 0; c < frame->nb_samples; c++) {
|
for (c = 0; c < frame->nb_samples; c++) {
|
||||||
uint8_t vucf = s->framing_index == 0 ? 0x10 : 0;
|
uint8_t vucf = s->framing_index == 0 ? 0x10 : 0;
|
||||||
|
|
||||||
for (channels = 0; channels < avctx->channels; channels += 2) {
|
for (channels = 0; channels < nb_channels; channels += 2) {
|
||||||
o[0] = ff_reverse[ samples[0] & 0xFF];
|
o[0] = ff_reverse[ samples[0] & 0xFF];
|
||||||
o[1] = ff_reverse[(samples[0] & 0xFF00) >> 8];
|
o[1] = ff_reverse[(samples[0] & 0xFF00) >> 8];
|
||||||
o[2] = ff_reverse[(samples[1] & 0x0F) << 4] | vucf;
|
o[2] = ff_reverse[(samples[1] & 0x0F) << 4] | vucf;
|
||||||
|
Reference in New Issue
Block a user