1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  Remove ffmpeg.
  aacenc: Simplify windowing
  aacenc: Move saved overlap samples to the beginning of the same buffer as incoming samples.
  aacenc: Deinterleave input samples before processing.
  aacenc: Store channel count in AACEncContext.
  aacenc: Move Q^3/4 calculation to it's own table
  aacenc: Request normalized float samples instead of converting s16 samples to float.
  aacpsy: Replace an if with FFMAX in LAME windowing.
  aacenc: cosmetics, replace 'rd' with 'bits' in codebook_trellis_rate to make it more clear what is being calculated.
  aacpsy: cosmetics, change a FIXME to a NOTE about subshort comparisons
  aacenc: cosmetics: move init() and end() to the bottom of the file.
  aacenc: aac_encode_init() cleanup
  XWD encoder and decoder
  vc1: don't read the interpfrm and bfraction elements for interlaced frames
  mxfdec: fix memleak on mxf_read_close()
  westwood: split the AUD and VQA demuxers into separate files.

Conflicts:
	.gitignore
	Changelog
	Makefile
	configure
	doc/ffmpeg.texi
	ffmpeg.c
	libavcodec/Makefile
	libavcodec/aacenc.c
	libavcodec/allcodecs.c
	libavcodec/avcodec.h
	libavcodec/version.h
	libavformat/Makefile
	libavformat/img2.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-01-24 02:41:53 +01:00
commit 0bb57f8bf0
20 changed files with 1030 additions and 345 deletions

View File

@ -15,6 +15,7 @@ version next:
- amerge audio filter
- GSM audio parser
- SMJPEG muxer
- XWD encoder and decoder
- Automatic thread count based on detection number of (available) CPU cores
- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
- ffprobe -show_error option

View File

@ -399,6 +399,8 @@ following image formats are supported:
@tab YUV, JPEG and some extension is not supported yet.
@item Truevision Targa @tab X @tab X
@tab Targa (.TGA) image format
@item XWD @tab X @tab X
@tab X Window Dump image format
@end multitable
@code{X} means that encoding (resp. decoding) is supported.

View File

@ -470,6 +470,8 @@ OBJS-$(CONFIG_XBIN_DECODER) += bintext.o cga_data.o
OBJS-$(CONFIG_XL_DECODER) += xl.o
OBJS-$(CONFIG_XSUB_DECODER) += xsubdec.o
OBJS-$(CONFIG_XSUB_ENCODER) += xsubenc.o
OBJS-$(CONFIG_XWD_DECODER) += xwddec.o
OBJS-$(CONFIG_XWD_ENCODER) += xwdenc.o
OBJS-$(CONFIG_Y41P_DECODER) += y41pdec.o
OBJS-$(CONFIG_Y41P_ENCODER) += y41penc.o
OBJS-$(CONFIG_YOP_DECODER) += yop.o

View File

@ -110,14 +110,15 @@ static av_always_inline float quantize_and_encode_band_cost_template(
int *bits, int BT_ZERO, int BT_UNSIGNED,
int BT_PAIR, int BT_ESC)
{
const float IQ = ff_aac_pow2sf_tab[POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
const float Q = ff_aac_pow2sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
const int q_idx = POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512;
const float Q = ff_aac_pow2sf_tab [q_idx];
const float Q34 = ff_aac_pow34sf_tab[q_idx];
const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
const float CLIPPED_ESCAPE = 165140.0f*IQ;
int i, j;
float cost = 0;
const int dim = BT_PAIR ? 2 : 4;
int resbits = 0;
const float Q34 = sqrtf(Q * sqrtf(Q));
const int range = aac_cb_range[cb];
const int maxval = aac_cb_maxval[cb];
int off;
@ -420,7 +421,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
const int run_esc = (1 << run_bits) - 1;
int idx, ppos, count;
int stackrun[120], stackcb[120], stack_len;
float next_minrd = INFINITY;
float next_minbits = INFINITY;
int next_mincb = 0;
abs_pow34_v(s->scoefs, sce->coeffs, 1024);
@ -434,7 +435,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
size = sce->ics.swb_sizes[swb];
if (sce->zeroes[win*16 + swb]) {
float cost_stay_here = path[swb][0].cost;
float cost_get_here = next_minrd + run_bits + 4;
float cost_get_here = next_minbits + run_bits + 4;
if ( run_value_bits[sce->ics.num_windows == 8][path[swb][0].run]
!= run_value_bits[sce->ics.num_windows == 8][path[swb][0].run+1])
cost_stay_here += run_bits;
@ -447,7 +448,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
path[swb+1][0].cost = cost_stay_here;
path[swb+1][0].run = path[swb][0].run + 1;
}
next_minrd = path[swb+1][0].cost;
next_minbits = path[swb+1][0].cost;
next_mincb = 0;
for (cb = 1; cb < 12; cb++) {
path[swb+1][cb].cost = 61450;
@ -455,10 +456,10 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
path[swb+1][cb].run = 0;
}
} else {
float minrd = next_minrd;
float minbits = next_minbits;
int mincb = next_mincb;
int startcb = sce->band_type[win*16+swb];
next_minrd = INFINITY;
next_minbits = INFINITY;
next_mincb = 0;
for (cb = 0; cb < startcb; cb++) {
path[swb+1][cb].cost = 61450;
@ -467,15 +468,15 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
}
for (cb = startcb; cb < 12; cb++) {
float cost_stay_here, cost_get_here;
float rd = 0.0f;
float bits = 0.0f;
for (w = 0; w < group_len; w++) {
rd += quantize_band_cost(s, sce->coeffs + start + w*128,
s->scoefs + start + w*128, size,
sce->sf_idx[(win+w)*16+swb], cb,
0, INFINITY, NULL);
bits += quantize_band_cost(s, sce->coeffs + start + w*128,
s->scoefs + start + w*128, size,
sce->sf_idx[(win+w)*16+swb], cb,
0, INFINITY, NULL);
}
cost_stay_here = path[swb][cb].cost + rd;
cost_get_here = minrd + rd + run_bits + 4;
cost_stay_here = path[swb][cb].cost + bits;
cost_get_here = minbits + bits + run_bits + 4;
if ( run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run]
!= run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run+1])
cost_stay_here += run_bits;
@ -488,8 +489,8 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
path[swb+1][cb].cost = cost_stay_here;
path[swb+1][cb].run = path[swb][cb].run + 1;
}
if (path[swb+1][cb].cost < next_minrd) {
next_minrd = path[swb+1][cb].cost;
if (path[swb+1][cb].cost < next_minbits) {
next_minbits = path[swb+1][cb].cost;
next_mincb = cb;
}
}

View File

@ -46,6 +46,14 @@
#define AAC_MAX_CHANNELS 6
#define ERROR_IF(cond, ...) \
if (cond) { \
av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
return AVERROR(EINVAL); \
}
float ff_aac_pow34sf_tab[428];
static const uint8_t swb_size_1024_96[] = {
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8,
12, 12, 12, 12, 12, 16, 16, 24, 28, 36, 44,
@ -135,6 +143,18 @@ static const uint8_t aac_chan_configs[6][5] = {
{4, TYPE_SCE, TYPE_CPE, TYPE_CPE, TYPE_LFE}, // 6 channels - front center + stereo + back stereo + LFE
};
/**
* Table to remap channels from Libav's default order to AAC order.
*/
static const uint8_t aac_chan_maps[AAC_MAX_CHANNELS][AAC_MAX_CHANNELS] = {
{ 0 },
{ 0, 1 },
{ 2, 0, 1 },
{ 2, 0, 1, 3 },
{ 2, 0, 1, 3, 4 },
{ 2, 0, 1, 4, 5, 3 },
};
/**
* Make AAC audio config object.
* @see 1.6.2.1 "Syntax - AudioSpecificConfig"
@ -147,7 +167,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
put_bits(&pb, 5, 2); //object type - AAC-LC
put_bits(&pb, 4, s->samplerate_index); //sample rate index
put_bits(&pb, 4, avctx->channels);
put_bits(&pb, 4, s->channels);
//GASpecificConfig
put_bits(&pb, 1, 0); //frame length - 1024 samples
put_bits(&pb, 1, 0); //does not depend on core coder
@ -160,117 +180,80 @@ static void put_audio_specific_config(AVCodecContext *avctx)
flush_put_bits(&pb);
}
static av_cold int aac_encode_init(AVCodecContext *avctx)
#define WINDOW_FUNC(type) \
static void apply_ ##type ##_window(DSPContext *dsp, SingleChannelElement *sce, const float *audio)
WINDOW_FUNC(only_long)
{
AACEncContext *s = avctx->priv_data;
int i;
const uint8_t *sizes[2];
uint8_t grouping[AAC_MAX_CHANNELS];
int lengths[2];
const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
float *out = sce->ret;
avctx->frame_size = 1024;
for (i = 0; i < 16; i++)
if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i])
break;
if (i == 16) {
av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d\n", avctx->sample_rate);
return -1;
}
if (avctx->channels > AAC_MAX_CHANNELS) {
av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n", avctx->channels);
return -1;
}
if (avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW) {
av_log(avctx, AV_LOG_ERROR, "Unsupported profile %d\n", avctx->profile);
return -1;
}
if (1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "Too many bits per frame requested\n");
return -1;
}
s->samplerate_index = i;
dsputil_init(&s->dsp, avctx);
ff_mdct_init(&s->mdct1024, 11, 0, 1.0);
ff_mdct_init(&s->mdct128, 8, 0, 1.0);
// window init
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
ff_init_ff_sine_windows(10);
ff_init_ff_sine_windows(7);
s->chan_map = aac_chan_configs[avctx->channels-1];
s->samples = av_malloc(2 * 1024 * avctx->channels * sizeof(s->samples[0]));
s->cpe = av_mallocz(sizeof(ChannelElement) * s->chan_map[0]);
avctx->extradata = av_mallocz(5 + FF_INPUT_BUFFER_PADDING_SIZE);
avctx->extradata_size = 5;
put_audio_specific_config(avctx);
sizes[0] = swb_size_1024[i];
sizes[1] = swb_size_128[i];
lengths[0] = ff_aac_num_swb_1024[i];
lengths[1] = ff_aac_num_swb_128[i];
for (i = 0; i < s->chan_map[0]; i++)
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping);
s->psypp = ff_psy_preprocess_init(avctx);
s->coder = &ff_aac_coders[s->options.aac_coder];
s->lambda = avctx->global_quality ? avctx->global_quality : 120;
ff_aac_tableinit();
return 0;
dsp->vector_fmul (out, audio, lwindow, 1024);
dsp->vector_fmul_reverse(out + 1024, audio + 1024, pwindow, 1024);
}
static void apply_window_and_mdct(AVCodecContext *avctx, AACEncContext *s,
SingleChannelElement *sce, short *audio)
WINDOW_FUNC(long_start)
{
int i, k;
const int chans = avctx->channels;
const float * lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
const float * swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
const float * pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
const float *lwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
float *out = sce->ret;
dsp->vector_fmul(out, audio, lwindow, 1024);
memcpy(out + 1024, audio, sizeof(out[0]) * 448);
dsp->vector_fmul_reverse(out + 1024 + 448, audio, swindow, 128);
memset(out + 1024 + 576, 0, sizeof(out[0]) * 448);
}
WINDOW_FUNC(long_stop)
{
const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
const float *swindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
float *out = sce->ret;
memset(out, 0, sizeof(out[0]) * 448);
dsp->vector_fmul(out + 448, audio + 448, swindow, 128);
memcpy(out + 576, audio + 576, sizeof(out[0]) * 448);
dsp->vector_fmul_reverse(out + 1024, audio + 1024, lwindow, 1024);
}
WINDOW_FUNC(eight_short)
{
const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
const float *in = audio + 448;
float *out = sce->ret;
for (int w = 0; w < 8; w++) {
dsp->vector_fmul (out, in, w ? pwindow : swindow, 128);
out += 128;
in += 128;
dsp->vector_fmul_reverse(out, in, swindow, 128);
out += 128;
}
}
static void (*const apply_window[4])(DSPContext *dsp, SingleChannelElement *sce, const float *audio) = {
[ONLY_LONG_SEQUENCE] = apply_only_long_window,
[LONG_START_SEQUENCE] = apply_long_start_window,
[EIGHT_SHORT_SEQUENCE] = apply_eight_short_window,
[LONG_STOP_SEQUENCE] = apply_long_stop_window
};
static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce,
float *audio)
{
int i;
float *output = sce->ret;
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
memcpy(output, sce->saved, sizeof(float)*1024);
if (sce->ics.window_sequence[0] == LONG_STOP_SEQUENCE) {
memset(output, 0, sizeof(output[0]) * 448);
for (i = 448; i < 576; i++)
output[i] = sce->saved[i] * pwindow[i - 448];
for (i = 576; i < 704; i++)
output[i] = sce->saved[i];
}
if (sce->ics.window_sequence[0] != LONG_START_SEQUENCE) {
for (i = 0; i < 1024; i++) {
output[i+1024] = audio[i * chans] * lwindow[1024 - i - 1];
sce->saved[i] = audio[i * chans] * lwindow[i];
}
} else {
for (i = 0; i < 448; i++)
output[i+1024] = audio[i * chans];
for (; i < 576; i++)
output[i+1024] = audio[i * chans] * swindow[576 - i - 1];
memset(output+1024+576, 0, sizeof(output[0]) * 448);
for (i = 0; i < 1024; i++)
sce->saved[i] = audio[i * chans];
}
apply_window[sce->ics.window_sequence[0]](&s->dsp, sce, audio);
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE)
s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output);
} else {
for (k = 0; k < 1024; k += 128) {
for (i = 448 + k; i < 448 + k + 256; i++)
output[i - 448 - k] = (i < 1024)
? sce->saved[i]
: audio[(i-1024)*chans];
s->dsp.vector_fmul (output, output, k ? swindow : pwindow, 128);
s->dsp.vector_fmul_reverse(output+128, output+128, swindow, 128);
s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + k, output);
}
for (i = 0; i < 1024; i++)
sce->saved[i] = audio[i * chans];
}
else
for (i = 0; i < 1024; i += 128)
s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + i, output + i*2);
memcpy(audio, audio + 1024, sizeof(audio[0]) * 1024);
}
/**
@ -488,11 +471,37 @@ static void put_bitstream_info(AVCodecContext *avctx, AACEncContext *s,
put_bits(&s->pb, 12 - padbits, 0);
}
/*
* Deinterleave input samples.
* Channels are reordered from Libav's default order to AAC order.
*/
static void deinterleave_input_samples(AACEncContext *s,
const float *samples)
{
int ch, i;
const int sinc = s->channels;
const uint8_t *channel_map = aac_chan_maps[sinc - 1];
/* deinterleave and remap input samples */
for (ch = 0; ch < sinc; ch++) {
const float *sptr = samples + channel_map[ch];
/* copy last 1024 samples of previous frame to the start of the current frame */
memcpy(&s->planar_samples[ch][0], &s->planar_samples[ch][1024], 1024 * sizeof(s->planar_samples[0][0]));
/* deinterleave */
for (i = 1024; i < 1024 * 2; i++) {
s->planar_samples[ch][i] = *sptr;
sptr += sinc;
}
}
}
static int aac_encode_frame(AVCodecContext *avctx,
uint8_t *frame, int buf_size, void *data)
{
AACEncContext *s = avctx->priv_data;
int16_t *samples = s->samples, *samples2, *la;
float **samples = s->planar_samples, *samples2, *la, *overlap;
ChannelElement *cpe;
int i, ch, w, g, chans, tag, start_ch;
int chan_el_counter[4];
@ -500,27 +509,15 @@ static int aac_encode_frame(AVCodecContext *avctx,
if (s->last_frame)
return 0;
if (data) {
if (!s->psypp) {
memcpy(s->samples + 1024 * avctx->channels, data,
1024 * avctx->channels * sizeof(s->samples[0]));
} else {
start_ch = 0;
samples2 = s->samples + 1024 * avctx->channels;
for (i = 0; i < s->chan_map[0]; i++) {
tag = s->chan_map[i+1];
chans = tag == TYPE_CPE ? 2 : 1;
ff_psy_preprocess(s->psypp, (uint16_t*)data + start_ch,
samples2 + start_ch, start_ch, chans);
start_ch += chans;
}
}
deinterleave_input_samples(s, data);
if (s->psypp)
ff_psy_preprocess(s->psypp, s->planar_samples, s->channels);
}
if (!avctx->frame_number) {
memcpy(s->samples, s->samples + 1024 * avctx->channels,
1024 * avctx->channels * sizeof(s->samples[0]));
if (!avctx->frame_number)
return 0;
}
start_ch = 0;
for (i = 0; i < s->chan_map[0]; i++) {
@ -531,8 +528,9 @@ static int aac_encode_frame(AVCodecContext *avctx,
for (ch = 0; ch < chans; ch++) {
IndividualChannelStream *ics = &cpe->ch[ch].ics;
int cur_channel = start_ch + ch;
samples2 = samples + cur_channel;
la = samples2 + (448+64) * avctx->channels;
overlap = &samples[cur_channel][0];
samples2 = overlap + 1024;
la = samples2 + (448+64);
if (!data)
la = NULL;
if (tag == TYPE_LFE) {
@ -560,7 +558,7 @@ static int aac_encode_frame(AVCodecContext *avctx,
for (w = 0; w < ics->num_windows; w++)
ics->group_len[w] = wi[ch].grouping[w];
apply_window_and_mdct(avctx, s, &cpe->ch[ch], samples2);
apply_window_and_mdct(s, &cpe->ch[ch], overlap);
}
start_ch += chans;
}
@ -626,8 +624,8 @@ static int aac_encode_frame(AVCodecContext *avctx,
}
frame_bits = put_bits_count(&s->pb);
if (frame_bits <= 6144 * avctx->channels - 3) {
s->psy.bitres.bits = frame_bits / avctx->channels;
if (frame_bits <= 6144 * s->channels - 3) {
s->psy.bitres.bits = frame_bits / s->channels;
break;
}
@ -648,8 +646,7 @@ static int aac_encode_frame(AVCodecContext *avctx,
if (!data)
s->last_frame = 1;
memcpy(s->samples, s->samples + 1024 * avctx->channels,
1024 * avctx->channels * sizeof(s->samples[0]));
return put_bits_count(&s->pb)>>3;
}
@ -660,12 +657,109 @@ static av_cold int aac_encode_end(AVCodecContext *avctx)
ff_mdct_end(&s->mdct1024);
ff_mdct_end(&s->mdct128);
ff_psy_end(&s->psy);
ff_psy_preprocess_end(s->psypp);
av_freep(&s->samples);
if (s->psypp)
ff_psy_preprocess_end(s->psypp);
av_freep(&s->buffer.samples);
av_freep(&s->cpe);
return 0;
}
static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
{
int ret = 0;
dsputil_init(&s->dsp, avctx);
// window init
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
ff_init_ff_sine_windows(10);
ff_init_ff_sine_windows(7);
if (ret = ff_mdct_init(&s->mdct1024, 11, 0, 32768.0))
return ret;
if (ret = ff_mdct_init(&s->mdct128, 8, 0, 32768.0))
return ret;
return 0;
}
static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s)
{
FF_ALLOCZ_OR_GOTO(avctx, s->buffer.samples, 3 * 1024 * s->channels * sizeof(s->buffer.samples[0]), alloc_fail);
FF_ALLOCZ_OR_GOTO(avctx, s->cpe, sizeof(ChannelElement) * s->chan_map[0], alloc_fail);
FF_ALLOCZ_OR_GOTO(avctx, avctx->extradata, 5 + FF_INPUT_BUFFER_PADDING_SIZE, alloc_fail);
for(int ch = 0; ch < s->channels; ch++)
s->planar_samples[ch] = s->buffer.samples + 3 * 1024 * ch;
return 0;
alloc_fail:
return AVERROR(ENOMEM);
}
static av_cold int aac_encode_init(AVCodecContext *avctx)
{
AACEncContext *s = avctx->priv_data;
int i, ret = 0;
const uint8_t *sizes[2];
uint8_t grouping[AAC_MAX_CHANNELS];
int lengths[2];
avctx->frame_size = 1024;
for (i = 0; i < 16; i++)
if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i])
break;
s->channels = avctx->channels;
ERROR_IF(i == 16,
"Unsupported sample rate %d\n", avctx->sample_rate);
ERROR_IF(s->channels > AAC_MAX_CHANNELS,
"Unsupported number of channels: %d\n", s->channels);
ERROR_IF(avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW,
"Unsupported profile %d\n", avctx->profile);
ERROR_IF(1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * s->channels,
"Too many bits per frame requested\n");
s->samplerate_index = i;
s->chan_map = aac_chan_configs[s->channels-1];
if (ret = dsp_init(avctx, s))
goto fail;
if (ret = alloc_buffers(avctx, s))
goto fail;
avctx->extradata_size = 5;
put_audio_specific_config(avctx);
sizes[0] = swb_size_1024[i];
sizes[1] = swb_size_128[i];
lengths[0] = ff_aac_num_swb_1024[i];
lengths[1] = ff_aac_num_swb_128[i];
for (i = 0; i < s->chan_map[0]; i++)
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
if (ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping))
goto fail;
s->psypp = ff_psy_preprocess_init(avctx);
s->coder = &ff_aac_coders[s->options.aac_coder];
s->lambda = avctx->global_quality ? avctx->global_quality : 120;
ff_aac_tableinit();
for (i = 0; i < 428; i++)
ff_aac_pow34sf_tab[i] = sqrt(ff_aac_pow2sf_tab[i] * sqrt(ff_aac_pow2sf_tab[i]));
return 0;
fail:
aac_encode_end(avctx);
return ret;
}
#define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
static const AVOption aacenc_options[] = {
{"stereo_mode", "Stereo coding method", offsetof(AACEncContext, options.stereo_mode), AV_OPT_TYPE_INT, {.dbl = 0}, -1, 1, AACENC_FLAGS, "stereo_mode"},
@ -692,7 +786,7 @@ AVCodec ff_aac_encoder = {
.encode = aac_encode_frame,
.close = aac_encode_end,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
.priv_class = &aacenc_class,
};

View File

@ -61,9 +61,10 @@ typedef struct AACEncContext {
FFTContext mdct1024; ///< long (1024 samples) frame transform context
FFTContext mdct128; ///< short (128 samples) frame transform context
DSPContext dsp;
int16_t *samples; ///< saved preprocessed input
float *planar_samples[6]; ///< saved preprocessed input
int samplerate_index; ///< MPEG-4 samplerate index
int channels; ///< channel count
const uint8_t *chan_map; ///< channel configuration map
ChannelElement *cpe; ///< channel elements
@ -75,6 +76,12 @@ typedef struct AACEncContext {
float lambda;
DECLARE_ALIGNED(16, int, qcoefs)[96]; ///< quantized coefficients
DECLARE_ALIGNED(32, float, scoefs)[1024]; ///< scaled coefficients
struct {
float *samples;
} buffer;
} AACEncContext;
extern float ff_aac_pow34sf_tab[428];
#endif /* AVCODEC_AACENC_H */

View File

@ -400,7 +400,7 @@ static av_unused FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx,
int stay_short = 0;
for (i = 0; i < 8; i++) {
for (j = 0; j < 128; j++) {
v = iir_filter(la[(i*128+j)*ctx->avctx->channels], pch->iir_state);
v = iir_filter(la[i*128+j], pch->iir_state);
sum += v*v;
}
s[i] = sum;
@ -776,9 +776,8 @@ static void lame_apply_block_type(AacPsyChannel *ctx, FFPsyWindowInfo *wi, int u
ctx->next_window_seq = blocktype;
}
static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx,
const int16_t *audio, const int16_t *la,
int channel, int prev_type)
static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const float *audio,
const float *la, int channel, int prev_type)
{
AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data;
AacPsyChannel *pch = &pctx->ch[channel];
@ -795,20 +794,20 @@ static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx,
float attack_intensity[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS];
float energy_subshort[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS];
float energy_short[AAC_NUM_BLOCKS_SHORT + 1] = { 0 };
int chans = ctx->avctx->channels;
const int16_t *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN) * chans;
const float *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN);
int j, att_sum = 0;
/* LAME comment: apply high pass filter of fs/4 */
for (i = 0; i < AAC_BLOCK_SIZE_LONG; i++) {
float sum1, sum2;
sum1 = firbuf[(i + ((PSY_LAME_FIR_LEN - 1) / 2)) * chans];
sum1 = firbuf[i + (PSY_LAME_FIR_LEN - 1) / 2];
sum2 = 0.0;
for (j = 0; j < ((PSY_LAME_FIR_LEN - 1) / 2) - 1; j += 2) {
sum1 += psy_fir_coeffs[j] * (firbuf[(i + j) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j) * chans]);
sum2 += psy_fir_coeffs[j + 1] * (firbuf[(i + j + 1) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j - 1) * chans]);
sum1 += psy_fir_coeffs[j] * (firbuf[i + j] + firbuf[i + PSY_LAME_FIR_LEN - j]);
sum2 += psy_fir_coeffs[j + 1] * (firbuf[i + j + 1] + firbuf[i + PSY_LAME_FIR_LEN - j - 1]);
}
hpfsmpl[i] = sum1 + sum2;
/* NOTE: The LAME psymodel expects it's input in the range -32768 to 32768. Tuning this for normalized floats would be difficult. */
hpfsmpl[i] = (sum1 + sum2) * 32768.0f;
}
/* Calculate the energies of each sub-shortblock */
@ -823,16 +822,15 @@ static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx,
float const *const pfe = pf + AAC_BLOCK_SIZE_LONG / (AAC_NUM_BLOCKS_SHORT * PSY_LAME_NUM_SUBBLOCKS);
float p = 1.0f;
for (; pf < pfe; pf++)
if (p < fabsf(*pf))
p = fabsf(*pf);
p = FFMAX(p, fabsf(*pf));
pch->prev_energy_subshort[i] = energy_subshort[i + PSY_LAME_NUM_SUBBLOCKS] = p;
energy_short[1 + i / PSY_LAME_NUM_SUBBLOCKS] += p;
/* FIXME: The indexes below are [i + 3 - 2] in the LAME source.
* Obviously the 3 and 2 have some significance, or this would be just [i + 1]
* (which is what we use here). What the 3 stands for is ambigious, as it is both
* number of short blocks, and the number of sub-short blocks.
* It seems that LAME is comparing each sub-block to sub-block + 1 in the
* previous block.
/* NOTE: The indexes below are [i + 3 - 2] in the LAME source.
* Obviously the 3 and 2 have some significance, or this would be just [i + 1]
* (which is what we use here). What the 3 stands for is ambiguous, as it is both
* number of short blocks, and the number of sub-short blocks.
* It seems that LAME is comparing each sub-block to sub-block + 1 in the
* previous block.
*/
if (p > energy_subshort[i + 1])
p = p / energy_subshort[i + 1];

View File

@ -245,6 +245,7 @@ void avcodec_register_all(void)
REGISTER_DECODER (XAN_WC3, xan_wc3);
REGISTER_DECODER (XAN_WC4, xan_wc4);
REGISTER_DECODER (XL, xl);
REGISTER_ENCDEC (XWD, xwd);
REGISTER_ENCDEC (Y41P, y41p);
REGISTER_DECODER (YOP, yop);
REGISTER_ENCDEC (YUV4, yuv4);

View File

@ -255,6 +255,7 @@ enum CodecID {
CODEC_ID_VBLE,
CODEC_ID_DXTORY,
CODEC_ID_V410,
CODEC_ID_XWD,
CODEC_ID_Y41P = MKBETAG('Y','4','1','P'),
CODEC_ID_UTVIDEO = 0x800,
CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'),

View File

@ -112,20 +112,15 @@ av_cold struct FFPsyPreprocessContext* ff_psy_preprocess_init(AVCodecContext *av
return ctx;
}
void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx,
const int16_t *audio, int16_t *dest,
int tag, int channels)
void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, float **audio, int channels)
{
int ch, i;
int ch;
int frame_size = ctx->avctx->frame_size;
if (ctx->fstate) {
for (ch = 0; ch < channels; ch++)
ff_iir_filter(ctx->fcoeffs, ctx->fstate[tag+ch], ctx->avctx->frame_size,
audio + ch, ctx->avctx->channels,
dest + ch, ctx->avctx->channels);
} else {
for (ch = 0; ch < channels; ch++)
for (i = 0; i < ctx->avctx->frame_size; i++)
dest[i*ctx->avctx->channels + ch] = audio[i*ctx->avctx->channels + ch];
ff_iir_filter_flt(ctx->fcoeffs, ctx->fstate[ch], frame_size,
&audio[ch][frame_size], 1, &audio[ch][frame_size], 1);
}
}

View File

@ -109,7 +109,7 @@ typedef struct FFPsyModel {
*
* @return suggested window information in a structure
*/
FFPsyWindowInfo (*window)(FFPsyContext *ctx, const int16_t *audio, const int16_t *la, int channel, int prev_type);
FFPsyWindowInfo (*window)(FFPsyContext *ctx, const float *audio, const float *la, int channel, int prev_type);
/**
* Perform psychoacoustic analysis and set band info (threshold, energy) for a group of channels.
@ -174,14 +174,10 @@ av_cold struct FFPsyPreprocessContext* ff_psy_preprocess_init(AVCodecContext *av
* Preprocess several channel in audio frame in order to compress it better.
*
* @param ctx preprocessing context
* @param audio samples to preprocess
* @param dest place to put filtered samples
* @param tag channel number
* @param channels number of channel to preprocess (some additional work may be done on stereo pair)
* @param audio samples to be filtered (in place)
* @param channels number of channel to preprocess
*/
void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx,
const int16_t *audio, int16_t *dest,
int tag, int channels);
void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, float **audio, int channels);
/**
* Cleanup audio preprocessing module.

View File

@ -918,13 +918,15 @@ int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
}
goto parse_common_info;
}
if (v->finterpflag)
v->interpfrm = get_bits1(gb);
if (v->s.pict_type == AV_PICTURE_TYPE_B) {
v->bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
v->bfraction = ff_vc1_bfraction_lut[v->bfraction_lut_index];
if (v->bfraction == 0) {
v->s.pict_type = AV_PICTURE_TYPE_BI; /* XXX: should not happen here */
if (v->fcm == PROGRESSIVE) {
if (v->finterpflag)
v->interpfrm = get_bits1(gb);
if (v->s.pict_type == AV_PICTURE_TYPE_B) {
v->bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
v->bfraction = ff_vc1_bfraction_lut[v->bfraction_lut_index];
if (v->bfraction == 0) {
v->s.pict_type = AV_PICTURE_TYPE_BI; /* XXX: should not happen here */
}
}
}

View File

@ -21,8 +21,8 @@
#define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53
#define LIBAVCODEC_VERSION_MINOR 57
#define LIBAVCODEC_VERSION_MICRO 105
#define LIBAVCODEC_VERSION_MINOR 58
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \

41
libavcodec/xwd.h Normal file
View File

@ -0,0 +1,41 @@
/*
* XWD image format
*
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_XWD_H
#define AVCODEC_XWD_H
#define XWD_VERSION 7
#define XWD_HEADER_SIZE 100
#define XWD_CMAP_SIZE 12
#define XWD_XY_BITMAP 0
#define XWD_XY_PIXMAP 1
#define XWD_Z_PIXMAP 2
#define XWD_STATIC_GRAY 0
#define XWD_GRAY_SCALE 1
#define XWD_STATIC_COLOR 2
#define XWD_PSEUDO_COLOR 3
#define XWD_TRUE_COLOR 4
#define XWD_DIRECT_COLOR 5
#endif /* AVCODEC_XWD_H */

267
libavcodec/xwddec.c Normal file
View File

@ -0,0 +1,267 @@
/*
* XWD image format
*
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/imgutils.h"
#include "avcodec.h"
#include "bytestream.h"
#include "xwd.h"
static av_cold int xwd_decode_init(AVCodecContext *avctx)
{
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0;
}
static int xwd_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt)
{
AVFrame *p = avctx->coded_frame;
const uint8_t *buf = avpkt->data;
int i, ret, buf_size = avpkt->size;
uint32_t version, header_size, vclass, ncolors;
uint32_t xoffset, be, bpp, lsize, rsize;
uint32_t pixformat, pixdepth, bunit, bitorder, bpad;
uint32_t rgb[3];
uint8_t *ptr;
if (buf_size < XWD_HEADER_SIZE)
return AVERROR_INVALIDDATA;
header_size = bytestream_get_be32(&buf);
if (buf_size < header_size)
return AVERROR_INVALIDDATA;
version = bytestream_get_be32(&buf);
if (version != XWD_VERSION) {
av_log(avctx, AV_LOG_ERROR, "unsupported version\n");
return AVERROR_INVALIDDATA;
}
if (header_size < XWD_HEADER_SIZE) {
av_log(avctx, AV_LOG_ERROR, "invalid header size\n");
return AVERROR_INVALIDDATA;
}
pixformat = bytestream_get_be32(&buf);
pixdepth = bytestream_get_be32(&buf);
avctx->width = bytestream_get_be32(&buf);
avctx->height = bytestream_get_be32(&buf);
xoffset = bytestream_get_be32(&buf);
be = bytestream_get_be32(&buf);
bunit = bytestream_get_be32(&buf);
bitorder = bytestream_get_be32(&buf);
bpad = bytestream_get_be32(&buf);
bpp = bytestream_get_be32(&buf);
lsize = bytestream_get_be32(&buf);
vclass = bytestream_get_be32(&buf);
rgb[0] = bytestream_get_be32(&buf);
rgb[1] = bytestream_get_be32(&buf);
rgb[2] = bytestream_get_be32(&buf);
buf += 8;
ncolors = bytestream_get_be32(&buf);
buf += header_size - (XWD_HEADER_SIZE - 20);
av_log(avctx, AV_LOG_DEBUG, "pixformat %d, pixdepth %d, bunit %d, bitorder %d, bpad %d\n",
pixformat, pixdepth, bunit, bitorder, bpad);
av_log(avctx, AV_LOG_DEBUG, "vclass %d, ncolors %d, bpp %d, be %d, lsize %d, xoffset %d\n",
vclass, ncolors, bpp, be, lsize, xoffset);
av_log(avctx, AV_LOG_DEBUG, "red %0x, green %0x, blue %0x\n", rgb[0], rgb[1], rgb[2]);
if (pixformat > XWD_Z_PIXMAP) {
av_log(avctx, AV_LOG_ERROR, "invalid pixmap format\n");
return AVERROR_INVALIDDATA;
}
if (pixdepth == 0 || pixdepth > 32) {
av_log(avctx, AV_LOG_ERROR, "invalid pixmap depth\n");
return AVERROR_INVALIDDATA;
}
if (xoffset) {
av_log_ask_for_sample(avctx, "unsupported xoffset %d\n", xoffset);
return AVERROR_PATCHWELCOME;
}
if (be > 1) {
av_log(avctx, AV_LOG_ERROR, "invalid byte order\n");
return AVERROR_INVALIDDATA;
}
if (bitorder > 1) {
av_log(avctx, AV_LOG_ERROR, "invalid bitmap bit order\n");
return AVERROR_INVALIDDATA;
}
if (bunit != 8 && bunit != 16 && bunit != 32) {
av_log(avctx, AV_LOG_ERROR, "invalid bitmap unit\n");
return AVERROR_INVALIDDATA;
}
if (bpad != 8 && bpad != 16 && bpad != 32) {
av_log(avctx, AV_LOG_ERROR, "invalid bitmap scan-line pad\n");
return AVERROR_INVALIDDATA;
}
if (bpp == 0 || bpp > 32) {
av_log(avctx, AV_LOG_ERROR, "invalid bits per pixel\n");
return AVERROR_INVALIDDATA;
}
if (ncolors > 256) {
av_log(avctx, AV_LOG_ERROR, "invalid number of entries in colormap\n");
return AVERROR_INVALIDDATA;
}
if ((ret = av_image_check_size(avctx->width, avctx->height, 0, NULL)) < 0)
return ret;
rsize = FFALIGN(avctx->width * bpp, bpad) / 8;
if (lsize < rsize) {
av_log(avctx, AV_LOG_ERROR, "invalid bytes per scan-line\n");
return AVERROR_INVALIDDATA;
}
if (buf_size < header_size + ncolors * XWD_CMAP_SIZE + avctx->height * lsize) {
av_log(avctx, AV_LOG_ERROR, "input buffer too small\n");
return AVERROR_INVALIDDATA;
}
if (pixformat != XWD_Z_PIXMAP) {
av_log(avctx, AV_LOG_ERROR, "pixmap format %d unsupported\n", pixformat);
return AVERROR_PATCHWELCOME;
}
avctx->pix_fmt = PIX_FMT_NONE;
switch (vclass) {
case XWD_STATIC_GRAY:
case XWD_GRAY_SCALE:
if (bpp != 1)
return AVERROR_INVALIDDATA;
if (pixdepth == 1)
avctx->pix_fmt = PIX_FMT_MONOWHITE;
break;
case XWD_STATIC_COLOR:
case XWD_PSEUDO_COLOR:
if (bpp == 8)
avctx->pix_fmt = PIX_FMT_PAL8;
break;
case XWD_TRUE_COLOR:
case XWD_DIRECT_COLOR:
if (bpp != 16 && bpp != 24 && bpp != 32)
return AVERROR_INVALIDDATA;
if (bpp == 16 && pixdepth == 15) {
if (rgb[0] == 0x7C00 && rgb[1] == 0x3E0 && rgb[2] == 0x1F)
avctx->pix_fmt = be ? PIX_FMT_RGB555BE : PIX_FMT_RGB555LE;
else if (rgb[0] == 0x1F && rgb[1] == 0x3E0 && rgb[2] == 0x7C00)
avctx->pix_fmt = be ? PIX_FMT_BGR555BE : PIX_FMT_BGR555LE;
} else if (bpp == 16 && pixdepth == 16) {
if (rgb[0] == 0xF800 && rgb[1] == 0x7E0 && rgb[2] == 0x1F)
avctx->pix_fmt = be ? PIX_FMT_RGB565BE : PIX_FMT_RGB565LE;
else if (rgb[0] == 0x1F && rgb[1] == 0x7E0 && rgb[2] == 0xF800)
avctx->pix_fmt = be ? PIX_FMT_BGR565BE : PIX_FMT_BGR565LE;
} else if (bpp == 24) {
if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF)
avctx->pix_fmt = be ? PIX_FMT_RGB24 : PIX_FMT_BGR24;
else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000)
avctx->pix_fmt = be ? PIX_FMT_BGR24 : PIX_FMT_RGB24;
} else if (bpp == 32) {
if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF)
avctx->pix_fmt = be ? PIX_FMT_ARGB : PIX_FMT_BGRA;
else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000)
avctx->pix_fmt = be ? PIX_FMT_ABGR : PIX_FMT_RGBA;
}
buf += ncolors * XWD_CMAP_SIZE;
break;
default:
av_log(avctx, AV_LOG_ERROR, "invalid visual class\n");
return AVERROR_INVALIDDATA;
}
if (avctx->pix_fmt == PIX_FMT_NONE) {
av_log_ask_for_sample(avctx, "unknown file: bpp %d, pixdepth %d, vclass %d\n", bpp, pixdepth, vclass);
return AVERROR_PATCHWELCOME;
}
if (p->data[0])
avctx->release_buffer(avctx, p);
p->reference = 0;
if ((ret = avctx->get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
p->key_frame = 1;
p->pict_type = AV_PICTURE_TYPE_I;
if (avctx->pix_fmt == PIX_FMT_PAL8) {
uint32_t *dst = (uint32_t *)p->data[1];
uint8_t red, green, blue;
for (i = 0; i < ncolors; i++) {
buf += 4; // skip colormap entry number
red = *buf; buf += 2;
green = *buf; buf += 2;
blue = *buf; buf += 2;
buf += 2; // skip bitmask flag and padding
dst[i] = red << 16 | green << 8 | blue;
}
}
ptr = p->data[0];
for (i = 0; i < avctx->height; i++) {
bytestream_get_buffer(&buf, ptr, rsize);
buf += lsize - rsize;
ptr += p->linesize[0];
}
*data_size = sizeof(AVFrame);
*(AVFrame *)data = *p;
return buf_size;
}
static av_cold int xwd_decode_close(AVCodecContext *avctx)
{
if (avctx->coded_frame->data[0])
avctx->release_buffer(avctx, avctx->coded_frame);
av_freep(&avctx->coded_frame);
return 0;
}
AVCodec ff_xwd_decoder = {
.name = "xwd",
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_XWD,
.init = xwd_decode_init,
.close = xwd_decode_close,
.decode = xwd_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("XWD (X Window Dump) image"),
};

246
libavcodec/xwdenc.c Normal file
View File

@ -0,0 +1,246 @@
/*
* XWD image format
*
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/intreadwrite.h"
#include "libavutil/pixdesc.h"
#include "avcodec.h"
#include "bytestream.h"
#include "xwd.h"
#define WINDOW_NAME "lavcxwdenc"
#define WINDOW_NAME_SIZE 11
static av_cold int xwd_encode_init(AVCodecContext *avctx)
{
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0;
}
static int xwd_encode_frame(AVCodecContext *avctx, uint8_t *buf,
int buf_size, void *data)
{
AVFrame *p = data;
enum PixelFormat pix_fmt = avctx->pix_fmt;
uint32_t pixdepth, bpp, bpad, ncolors = 0, lsize, vclass, be = 0;
uint32_t rgb[3] = { 0 };
uint32_t header_size;
int i, out_size;
uint8_t *ptr;
pixdepth = av_get_bits_per_pixel(&av_pix_fmt_descriptors[pix_fmt]);
if (av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_BE)
be = 1;
switch (pix_fmt) {
case PIX_FMT_ARGB:
case PIX_FMT_BGRA:
case PIX_FMT_RGBA:
case PIX_FMT_ABGR:
if (pix_fmt == PIX_FMT_ARGB ||
pix_fmt == PIX_FMT_ABGR)
be = 1;
if (pix_fmt == PIX_FMT_ABGR ||
pix_fmt == PIX_FMT_RGBA) {
rgb[0] = 0xFF;
rgb[1] = 0xFF00;
rgb[2] = 0xFF0000;
} else {
rgb[0] = 0xFF0000;
rgb[1] = 0xFF00;
rgb[2] = 0xFF;
}
bpp = 32;
pixdepth = 24;
vclass = XWD_TRUE_COLOR;
bpad = 32;
break;
case PIX_FMT_BGR24:
case PIX_FMT_RGB24:
if (pix_fmt == PIX_FMT_RGB24)
be = 1;
bpp = 24;
vclass = XWD_TRUE_COLOR;
bpad = 32;
rgb[0] = 0xFF0000;
rgb[1] = 0xFF00;
rgb[2] = 0xFF;
break;
case PIX_FMT_RGB565LE:
case PIX_FMT_RGB565BE:
case PIX_FMT_BGR565LE:
case PIX_FMT_BGR565BE:
if (pix_fmt == PIX_FMT_BGR565LE ||
pix_fmt == PIX_FMT_BGR565BE) {
rgb[0] = 0x1F;
rgb[1] = 0x7E0;
rgb[2] = 0xF800;
} else {
rgb[0] = 0xF800;
rgb[1] = 0x7E0;
rgb[2] = 0x1F;
}
bpp = 16;
vclass = XWD_TRUE_COLOR;
bpad = 16;
break;
case PIX_FMT_RGB555LE:
case PIX_FMT_RGB555BE:
case PIX_FMT_BGR555LE:
case PIX_FMT_BGR555BE:
if (pix_fmt == PIX_FMT_BGR555LE ||
pix_fmt == PIX_FMT_BGR555BE) {
rgb[0] = 0x1F;
rgb[1] = 0x3E0;
rgb[2] = 0x7C00;
} else {
rgb[0] = 0x7C00;
rgb[1] = 0x3E0;
rgb[2] = 0x1F;
}
bpp = 16;
vclass = XWD_TRUE_COLOR;
bpad = 16;
break;
case PIX_FMT_RGB8:
case PIX_FMT_BGR8:
case PIX_FMT_RGB4_BYTE:
case PIX_FMT_BGR4_BYTE:
case PIX_FMT_PAL8:
bpp = 8;
vclass = XWD_PSEUDO_COLOR;
bpad = 8;
ncolors = 256;
break;
case PIX_FMT_MONOWHITE:
bpp = 1;
bpad = 8;
vclass = XWD_STATIC_GRAY;
break;
default:
av_log(avctx, AV_LOG_INFO, "unsupported pixel format\n");
return AVERROR(EINVAL);
}
lsize = FFALIGN(bpp * avctx->width, bpad) / 8;
header_size = XWD_HEADER_SIZE + WINDOW_NAME_SIZE;
out_size = header_size + ncolors * XWD_CMAP_SIZE + avctx->height * lsize;
if (buf_size < out_size) {
av_log(avctx, AV_LOG_ERROR, "output buffer too small\n");
return AVERROR(ENOMEM);
}
avctx->coded_frame->key_frame = 1;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
bytestream_put_be32(&buf, header_size);
bytestream_put_be32(&buf, XWD_VERSION); // file version
bytestream_put_be32(&buf, XWD_Z_PIXMAP); // pixmap format
bytestream_put_be32(&buf, pixdepth); // pixmap depth in pixels
bytestream_put_be32(&buf, avctx->width); // pixmap width in pixels
bytestream_put_be32(&buf, avctx->height); // pixmap height in pixels
bytestream_put_be32(&buf, 0); // bitmap x offset
bytestream_put_be32(&buf, be); // byte order
bytestream_put_be32(&buf, 32); // bitmap unit
bytestream_put_be32(&buf, be); // bit-order of image data
bytestream_put_be32(&buf, bpad); // bitmap scan-line pad in bits
bytestream_put_be32(&buf, bpp); // bits per pixel
bytestream_put_be32(&buf, lsize); // bytes per scan-line
bytestream_put_be32(&buf, vclass); // visual class
bytestream_put_be32(&buf, rgb[0]); // red mask
bytestream_put_be32(&buf, rgb[1]); // green mask
bytestream_put_be32(&buf, rgb[2]); // blue mask
bytestream_put_be32(&buf, 8); // size of each bitmask in bits
bytestream_put_be32(&buf, ncolors); // number of colors
bytestream_put_be32(&buf, ncolors); // number of entries in color map
bytestream_put_be32(&buf, avctx->width); // window width
bytestream_put_be32(&buf, avctx->height); // window height
bytestream_put_be32(&buf, 0); // window upper left X coordinate
bytestream_put_be32(&buf, 0); // window upper left Y coordinate
bytestream_put_be32(&buf, 0); // window border width
bytestream_put_buffer(&buf, WINDOW_NAME, WINDOW_NAME_SIZE);
for (i = 0; i < ncolors; i++) {
uint32_t val;
uint8_t red, green, blue;
val = AV_RN32A(p->data[1] + i * 4);
red = (val >> 16) & 0xFF;
green = (val >> 8) & 0xFF;
blue = val & 0xFF;
bytestream_put_be32(&buf, i); // colormap entry number
bytestream_put_be16(&buf, red << 8);
bytestream_put_be16(&buf, green << 8);
bytestream_put_be16(&buf, blue << 8);
bytestream_put_byte(&buf, 0x7); // bitmask flag
bytestream_put_byte(&buf, 0); // padding
}
ptr = p->data[0];
for (i = 0; i < avctx->height; i++) {
bytestream_put_buffer(&buf, ptr, lsize);
ptr += p->linesize[0];
}
return out_size;
}
static av_cold int xwd_encode_close(AVCodecContext *avctx)
{
av_freep(&avctx->coded_frame);
return 0;
}
AVCodec ff_xwd_encoder = {
.name = "xwd",
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_XWD,
.init = xwd_encode_init,
.encode = xwd_encode_frame,
.close = xwd_encode_close,
.pix_fmts = (const enum PixelFormat[]) { PIX_FMT_BGRA,
PIX_FMT_RGBA,
PIX_FMT_ARGB,
PIX_FMT_ABGR,
PIX_FMT_RGB24,
PIX_FMT_BGR24,
PIX_FMT_RGB565BE,
PIX_FMT_RGB565LE,
PIX_FMT_BGR565BE,
PIX_FMT_BGR565LE,
PIX_FMT_RGB555BE,
PIX_FMT_RGB555LE,
PIX_FMT_BGR555BE,
PIX_FMT_BGR555LE,
PIX_FMT_RGB8,
PIX_FMT_BGR8,
PIX_FMT_RGB4_BYTE,
PIX_FMT_BGR4_BYTE,
PIX_FMT_PAL8,
PIX_FMT_MONOWHITE,
PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("XWD (X Window Dump) image"),
};

View File

@ -326,8 +326,8 @@ OBJS-$(CONFIG_WC3_DEMUXER) += wc3movie.o
OBJS-$(CONFIG_WEBM_MUXER) += matroskaenc.o matroska.o \
riff.o isom.o avc.o \
flacenc_header.o avlanguage.o
OBJS-$(CONFIG_WSAUD_DEMUXER) += westwood.o
OBJS-$(CONFIG_WSVQA_DEMUXER) += westwood.o
OBJS-$(CONFIG_WSAUD_DEMUXER) += westwood_aud.o
OBJS-$(CONFIG_WSVQA_DEMUXER) += westwood_vqa.o
OBJS-$(CONFIG_WTV_DEMUXER) += wtvdec.o wtv.o asfdec.o asf.o asfcrypt.o \
avlanguage.o mpegts.o isom.o riff.o
OBJS-$(CONFIG_WTV_MUXER) += wtvenc.o wtv.o asf.o asfenc.o riff.o

View File

@ -91,6 +91,7 @@ static const IdStrMap img_tags[] = {
{ CODEC_ID_JPEG2000 , "jpc"},
{ CODEC_ID_DPX , "dpx"},
{ CODEC_ID_PICTOR , "pic"},
{ CODEC_ID_XWD , "xwd"},
{ CODEC_ID_NONE , NULL}
};
@ -528,7 +529,7 @@ AVOutputFormat ff_image2_muxer = {
.name = "image2",
.long_name = NULL_IF_CONFIG_SMALL("image2 sequence"),
.extensions = "bmp,dpx,jls,jpeg,jpg,ljpg,pam,pbm,pcx,pgm,pgmyuv,png,"
"ppm,sgi,tga,tif,tiff,jp2,j2c",
"ppm,sgi,tga,tif,tiff,jp2,j2c,xwd",
.priv_data_size = sizeof(VideoData),
.video_codec = CODEC_ID_MJPEG,
.write_header = write_header,

173
libavformat/westwood_aud.c Normal file
View File

@ -0,0 +1,173 @@
/*
* Westwood Studios AUD Format Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Westwood Studios AUD file demuxer
* by Mike Melanson (melanson@pcisys.net)
* for more information on the Westwood file formats, visit:
* http://www.pcisys.net/~melanson/codecs/
* http://www.geocities.com/SiliconValley/8682/aud3.txt
*
* Implementation note: There is no definite file signature for AUD files.
* The demuxer uses a probabilistic strategy for content detection. This
* entails performing sanity checks on certain header values in order to
* qualify a file. Refer to wsaud_probe() for the precise parameters.
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#define AUD_HEADER_SIZE 12
#define AUD_CHUNK_PREAMBLE_SIZE 8
#define AUD_CHUNK_SIGNATURE 0x0000DEAF
typedef struct WsAudDemuxContext {
int audio_samplerate;
int audio_channels;
int audio_bits;
enum CodecID audio_type;
int audio_stream_index;
int64_t audio_frame_counter;
} WsAudDemuxContext;
static int wsaud_probe(AVProbeData *p)
{
int field;
/* Probabilistic content detection strategy: There is no file signature
* so perform sanity checks on various header parameters:
* 8000 <= sample rate (16 bits) <= 48000 ==> 40001 acceptable numbers
* flags <= 0x03 (2 LSBs are used) ==> 4 acceptable numbers
* compression type (8 bits) = 1 or 99 ==> 2 acceptable numbers
* first audio chunk signature (32 bits) ==> 1 acceptable number
* The number space contains 2^64 numbers. There are 40001 * 4 * 2 * 1 =
* 320008 acceptable number combinations.
*/
if (p->buf_size < AUD_HEADER_SIZE + AUD_CHUNK_PREAMBLE_SIZE)
return 0;
/* check sample rate */
field = AV_RL16(&p->buf[0]);
if ((field < 8000) || (field > 48000))
return 0;
/* enforce the rule that the top 6 bits of this flags field are reserved (0);
* this might not be true, but enforce it until deemed unnecessary */
if (p->buf[10] & 0xFC)
return 0;
/* note: only check for WS IMA (type 99) right now since there is no
* support for type 1 */
if (p->buf[11] != 99)
return 0;
/* read ahead to the first audio chunk and validate the first header signature */
if (AV_RL32(&p->buf[16]) != AUD_CHUNK_SIGNATURE)
return 0;
/* return 1/2 certainty since this file check is a little sketchy */
return AVPROBE_SCORE_MAX / 2;
}
static int wsaud_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
WsAudDemuxContext *wsaud = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
unsigned char header[AUD_HEADER_SIZE];
if (avio_read(pb, header, AUD_HEADER_SIZE) != AUD_HEADER_SIZE)
return AVERROR(EIO);
wsaud->audio_samplerate = AV_RL16(&header[0]);
if (header[11] == 99)
wsaud->audio_type = CODEC_ID_ADPCM_IMA_WS;
else
return AVERROR_INVALIDDATA;
/* flag 0 indicates stereo */
wsaud->audio_channels = (header[10] & 0x1) + 1;
/* flag 1 indicates 16 bit audio */
wsaud->audio_bits = (((header[10] & 0x2) >> 1) + 1) * 8;
/* initialize the audio decoder stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, wsaud->audio_samplerate);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = wsaud->audio_type;
st->codec->codec_tag = 0; /* no tag */
st->codec->channels = wsaud->audio_channels;
st->codec->sample_rate = wsaud->audio_samplerate;
st->codec->bits_per_coded_sample = wsaud->audio_bits;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample / 4;
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
wsaud->audio_stream_index = st->index;
wsaud->audio_frame_counter = 0;
return 0;
}
static int wsaud_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
WsAudDemuxContext *wsaud = s->priv_data;
AVIOContext *pb = s->pb;
unsigned char preamble[AUD_CHUNK_PREAMBLE_SIZE];
unsigned int chunk_size;
int ret = 0;
if (avio_read(pb, preamble, AUD_CHUNK_PREAMBLE_SIZE) !=
AUD_CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
/* validate the chunk */
if (AV_RL32(&preamble[4]) != AUD_CHUNK_SIGNATURE)
return AVERROR_INVALIDDATA;
chunk_size = AV_RL16(&preamble[0]);
ret= av_get_packet(pb, pkt, chunk_size);
if (ret != chunk_size)
return AVERROR(EIO);
pkt->stream_index = wsaud->audio_stream_index;
pkt->pts = wsaud->audio_frame_counter;
pkt->pts /= wsaud->audio_samplerate;
/* 2 samples/byte, 1 or 2 samples per frame depending on stereo */
wsaud->audio_frame_counter += (chunk_size * 2) / wsaud->audio_channels;
return ret;
}
AVInputFormat ff_wsaud_demuxer = {
.name = "wsaud",
.long_name = NULL_IF_CONFIG_SMALL("Westwood Studios audio format"),
.priv_data_size = sizeof(WsAudDemuxContext),
.read_probe = wsaud_probe,
.read_header = wsaud_read_header,
.read_packet = wsaud_read_packet,
};

View File

@ -1,5 +1,5 @@
/*
* Westwood Studios Multimedia Formats Demuxer (VQA, AUD)
* Westwood Studios VQA Format Demuxer
* Copyright (c) 2003 The ffmpeg Project
*
* This file is part of FFmpeg.
@ -21,26 +21,17 @@
/**
* @file
* Westwood Studios VQA & AUD file demuxers
* Westwood Studios VQA file demuxer
* by Mike Melanson (melanson@pcisys.net)
* for more information on the Westwood file formats, visit:
* http://www.pcisys.net/~melanson/codecs/
* http://www.geocities.com/SiliconValley/8682/aud3.txt
*
* Implementation note: There is no definite file signature for AUD files.
* The demuxer uses a probabilistic strategy for content detection. This
* entails performing sanity checks on certain header values in order to
* qualify a file. Refer to wsaud_probe() for the precise parameters.
*/
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "internal.h"
#define AUD_HEADER_SIZE 12
#define AUD_CHUNK_PREAMBLE_SIZE 8
#define AUD_CHUNK_SIGNATURE 0x0000DEAF
#define FORM_TAG MKBETAG('F', 'O', 'R', 'M')
#define WVQA_TAG MKBETAG('W', 'V', 'Q', 'A')
#define VQHD_TAG MKBETAG('V', 'Q', 'H', 'D')
@ -63,15 +54,6 @@
#define VQA_FRAMERATE 15
#define VQA_PREAMBLE_SIZE 8
typedef struct WsAudDemuxContext {
int audio_samplerate;
int audio_channels;
int audio_bits;
enum CodecID audio_type;
int audio_stream_index;
int64_t audio_frame_counter;
} WsAudDemuxContext;
typedef struct WsVqaDemuxContext {
int audio_samplerate;
int audio_channels;
@ -83,119 +65,6 @@ typedef struct WsVqaDemuxContext {
int64_t audio_frame_counter;
} WsVqaDemuxContext;
static int wsaud_probe(AVProbeData *p)
{
int field;
/* Probabilistic content detection strategy: There is no file signature
* so perform sanity checks on various header parameters:
* 8000 <= sample rate (16 bits) <= 48000 ==> 40001 acceptable numbers
* flags <= 0x03 (2 LSBs are used) ==> 4 acceptable numbers
* compression type (8 bits) = 1 or 99 ==> 2 acceptable numbers
* first audio chunk signature (32 bits) ==> 1 acceptable number
* The number space contains 2^64 numbers. There are 40001 * 4 * 2 * 1 =
* 320008 acceptable number combinations.
*/
if (p->buf_size < AUD_HEADER_SIZE + AUD_CHUNK_PREAMBLE_SIZE)
return 0;
/* check sample rate */
field = AV_RL16(&p->buf[0]);
if ((field < 8000) || (field > 48000))
return 0;
/* enforce the rule that the top 6 bits of this flags field are reserved (0);
* this might not be true, but enforce it until deemed unnecessary */
if (p->buf[10] & 0xFC)
return 0;
/* note: only check for WS IMA (type 99) right now since there is no
* support for type 1 */
if (p->buf[11] != 99)
return 0;
/* read ahead to the first audio chunk and validate the first header signature */
if (AV_RL32(&p->buf[16]) != AUD_CHUNK_SIGNATURE)
return 0;
/* return 1/2 certainty since this file check is a little sketchy */
return AVPROBE_SCORE_MAX / 2;
}
static int wsaud_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
WsAudDemuxContext *wsaud = s->priv_data;
AVIOContext *pb = s->pb;
AVStream *st;
unsigned char header[AUD_HEADER_SIZE];
if (avio_read(pb, header, AUD_HEADER_SIZE) != AUD_HEADER_SIZE)
return AVERROR(EIO);
wsaud->audio_samplerate = AV_RL16(&header[0]);
if (header[11] == 99)
wsaud->audio_type = CODEC_ID_ADPCM_IMA_WS;
else
return AVERROR_INVALIDDATA;
/* flag 0 indicates stereo */
wsaud->audio_channels = (header[10] & 0x1) + 1;
/* flag 1 indicates 16 bit audio */
wsaud->audio_bits = (((header[10] & 0x2) >> 1) + 1) * 8;
/* initialize the audio decoder stream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, wsaud->audio_samplerate);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = wsaud->audio_type;
st->codec->codec_tag = 0; /* no tag */
st->codec->channels = wsaud->audio_channels;
st->codec->sample_rate = wsaud->audio_samplerate;
st->codec->bits_per_coded_sample = wsaud->audio_bits;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample / 4;
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
wsaud->audio_stream_index = st->index;
wsaud->audio_frame_counter = 0;
return 0;
}
static int wsaud_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
WsAudDemuxContext *wsaud = s->priv_data;
AVIOContext *pb = s->pb;
unsigned char preamble[AUD_CHUNK_PREAMBLE_SIZE];
unsigned int chunk_size;
int ret = 0;
if (avio_read(pb, preamble, AUD_CHUNK_PREAMBLE_SIZE) !=
AUD_CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
/* validate the chunk */
if (AV_RL32(&preamble[4]) != AUD_CHUNK_SIGNATURE)
return AVERROR_INVALIDDATA;
chunk_size = AV_RL16(&preamble[0]);
ret= av_get_packet(pb, pkt, chunk_size);
if (ret != chunk_size)
return AVERROR(EIO);
pkt->stream_index = wsaud->audio_stream_index;
pkt->pts = wsaud->audio_frame_counter;
pkt->pts /= wsaud->audio_samplerate;
/* 2 samples/byte, 1 or 2 samples per frame depending on stereo */
wsaud->audio_frame_counter += (chunk_size * 2) / wsaud->audio_channels;
return ret;
}
static int wsvqa_probe(AVProbeData *p)
{
/* need 12 bytes to qualify */
@ -367,17 +236,6 @@ static int wsvqa_read_packet(AVFormatContext *s,
return ret;
}
#if CONFIG_WSAUD_DEMUXER
AVInputFormat ff_wsaud_demuxer = {
.name = "wsaud",
.long_name = NULL_IF_CONFIG_SMALL("Westwood Studios audio format"),
.priv_data_size = sizeof(WsAudDemuxContext),
.read_probe = wsaud_probe,
.read_header = wsaud_read_header,
.read_packet = wsaud_read_packet,
};
#endif
#if CONFIG_WSVQA_DEMUXER
AVInputFormat ff_wsvqa_demuxer = {
.name = "wsvqa",
.long_name = NULL_IF_CONFIG_SMALL("Westwood Studios VQA format"),
@ -386,4 +244,3 @@ AVInputFormat ff_wsvqa_demuxer = {
.read_header = wsvqa_read_header,
.read_packet = wsvqa_read_packet,
};
#endif