1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

g722enc: split encoding into separate functions for trellis vs. no trellis

This commit is contained in:
Justin Ruggles 2012-01-05 23:34:09 -05:00
parent 96219141e2
commit 34093ba081

View File

@ -117,13 +117,12 @@ static inline int encode_low(const struct G722Band* state, int xlow)
return (diff < 0 ? (i < 2 ? 63 : 33) : 61) - i; return (diff < 0 ? (i < 2 ? 63 : 33) : 61) - i;
} }
static int g722_encode_trellis(AVCodecContext *avctx, static void g722_encode_trellis(G722Context *c, int trellis,
uint8_t *dst, int buf_size, void *data) uint8_t *dst, int nb_samples,
const int16_t *samples)
{ {
G722Context *c = avctx->priv_data;
const int16_t *samples = data;
int i, j, k; int i, j, k;
int frontier = 1 << avctx->trellis; int frontier = 1 << trellis;
struct TrellisNode **nodes[2]; struct TrellisNode **nodes[2];
struct TrellisNode **nodes_next[2]; struct TrellisNode **nodes_next[2];
int pathn[2] = {0, 0}, froze = -1; int pathn[2] = {0, 0}, froze = -1;
@ -139,7 +138,7 @@ static int g722_encode_trellis(AVCodecContext *avctx,
nodes[i][0]->state = c->band[i]; nodes[i][0]->state = c->band[i];
} }
for (i = 0; i < buf_size; i++) { for (i = 0; i < nb_samples >> 1; i++) {
int xlow, xhigh; int xlow, xhigh;
struct TrellisNode *next[2]; struct TrellisNode *next[2];
int heap_pos[2] = {0, 0}; int heap_pos[2] = {0, 0};
@ -271,8 +270,28 @@ static int g722_encode_trellis(AVCodecContext *avctx,
} }
c->band[0] = nodes[0][0]->state; c->band[0] = nodes[0][0]->state;
c->band[1] = nodes[1][0]->state; c->band[1] = nodes[1][0]->state;
}
return i; static av_always_inline void encode_byte(G722Context *c, uint8_t *dst,
const int16_t *samples)
{
int xlow, xhigh, ilow, ihigh;
filter_samples(c, samples, &xlow, &xhigh);
ihigh = encode_high(&c->band[1], xhigh);
ilow = encode_low (&c->band[0], xlow);
ff_g722_update_high_predictor(&c->band[1], c->band[1].scale_factor *
ff_g722_high_inv_quant[ihigh] >> 10, ihigh);
ff_g722_update_low_predictor(&c->band[0], ilow >> 2);
*dst = ihigh << 6 | ilow;
}
static void g722_encode_no_trellis(G722Context *c,
uint8_t *dst, int nb_samples,
const int16_t *samples)
{
int i;
for (i = 0; i < nb_samples; i += 2)
encode_byte(c, dst++, &samples[i]);
} }
static int g722_encode_frame(AVCodecContext *avctx, static int g722_encode_frame(AVCodecContext *avctx,
@ -280,22 +299,16 @@ static int g722_encode_frame(AVCodecContext *avctx,
{ {
G722Context *c = avctx->priv_data; G722Context *c = avctx->priv_data;
const int16_t *samples = data; const int16_t *samples = data;
int i; int nb_samples;
nb_samples = buf_size * 2;
if (avctx->trellis) if (avctx->trellis)
return g722_encode_trellis(avctx, dst, buf_size, data); g722_encode_trellis(c, avctx->trellis, dst, nb_samples, samples);
else
g722_encode_no_trellis(c, dst, nb_samples, samples);
for (i = 0; i < buf_size; i++) { return buf_size;
int xlow, xhigh, ihigh, ilow;
filter_samples(c, &samples[2*i], &xlow, &xhigh);
ihigh = encode_high(&c->band[1], xhigh);
ilow = encode_low(&c->band[0], xlow);
ff_g722_update_high_predictor(&c->band[1], c->band[1].scale_factor *
ff_g722_high_inv_quant[ihigh] >> 10, ihigh);
ff_g722_update_low_predictor(&c->band[0], ilow >> 2);
*dst++ = ihigh << 6 | ilow;
}
return i;
} }
AVCodec ff_adpcm_g722_encoder = { AVCodec ff_adpcm_g722_encoder = {