mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-01-08 13:22:53 +02:00
Merge remote-tracking branch 'qatar/master'
* qatar/master: x86: dsputil: Only compile motion_est code when encoders are enabled mem: fix typo in check for __ICC fate: mp3: drop redundant CMP setting rtp: Depacketization of JPEG (RFC 2435) Rename ff_put_string to avpriv_put_string mjpeg: Rename some symbols to avpriv_* instead of ff_* yadif: cosmetics Conflicts: Changelog libavcodec/mjpegenc.c libavcodec/x86/Makefile libavfilter/vf_yadif.c libavformat/version.h libavutil/mem.h Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
bff2afb3e9
@ -59,6 +59,7 @@ version next:
|
||||
- smartblur filter ported from MPlayer
|
||||
- CPiA decoder
|
||||
- decimate filter ported from MPlayer
|
||||
- RTP depacketization of JPEG
|
||||
|
||||
|
||||
version 0.11:
|
||||
|
@ -628,6 +628,7 @@ OBJS-$(CONFIG_OGG_DEMUXER) += xiph.o flac.o flacdata.o \
|
||||
OBJS-$(CONFIG_OGG_MUXER) += xiph.o flac.o flacdata.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o xiph.o
|
||||
OBJS-$(CONFIG_RTPDEC) += mjpeg.o
|
||||
OBJS-$(CONFIG_SPDIF_DEMUXER) += aacadtsdec.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_SPDIF_MUXER) += dca.o
|
||||
OBJS-$(CONFIG_WEBM_MUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||
|
@ -47,7 +47,7 @@ void avpriv_align_put_bits(PutBitContext *s)
|
||||
put_bits(s,s->bit_left & 7,0);
|
||||
}
|
||||
|
||||
void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
|
||||
void avpriv_put_string(PutBitContext *pb, const char *string, int terminate_string)
|
||||
{
|
||||
while(*string){
|
||||
put_bits(pb, 8, *string);
|
||||
|
@ -62,17 +62,17 @@ const unsigned char std_chrominance_quant_tbl[64] = {
|
||||
|
||||
/* Set up the standard Huffman tables (cf. JPEG standard section K.3) */
|
||||
/* IMPORTANT: these are only valid for 8-bit data precision! */
|
||||
const uint8_t ff_mjpeg_bits_dc_luminance[17] =
|
||||
const uint8_t avpriv_mjpeg_bits_dc_luminance[17] =
|
||||
{ /* 0-base */ 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 };
|
||||
const uint8_t ff_mjpeg_val_dc[12] =
|
||||
const uint8_t avpriv_mjpeg_val_dc[12] =
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
|
||||
|
||||
const uint8_t ff_mjpeg_bits_dc_chrominance[17] =
|
||||
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17] =
|
||||
{ /* 0-base */ 0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 };
|
||||
|
||||
const uint8_t ff_mjpeg_bits_ac_luminance[17] =
|
||||
const uint8_t avpriv_mjpeg_bits_ac_luminance[17] =
|
||||
{ /* 0-base */ 0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d };
|
||||
const uint8_t ff_mjpeg_val_ac_luminance[] =
|
||||
const uint8_t avpriv_mjpeg_val_ac_luminance[] =
|
||||
{ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
|
||||
0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
|
||||
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
|
||||
@ -96,10 +96,10 @@ const uint8_t ff_mjpeg_val_ac_luminance[] =
|
||||
0xf9, 0xfa
|
||||
};
|
||||
|
||||
const uint8_t ff_mjpeg_bits_ac_chrominance[17] =
|
||||
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17] =
|
||||
{ /* 0-base */ 0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 };
|
||||
|
||||
const uint8_t ff_mjpeg_val_ac_chrominance[] =
|
||||
const uint8_t avpriv_mjpeg_val_ac_chrominance[] =
|
||||
{ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
|
||||
0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
|
||||
0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
|
||||
|
@ -138,16 +138,16 @@ static inline void put_marker(PutBitContext *p, int code)
|
||||
case 7: ret= (left + top)>>1; break;\
|
||||
}
|
||||
|
||||
extern const uint8_t ff_mjpeg_bits_dc_luminance[];
|
||||
extern const uint8_t ff_mjpeg_val_dc[];
|
||||
extern const uint8_t avpriv_mjpeg_bits_dc_luminance[];
|
||||
extern const uint8_t avpriv_mjpeg_val_dc[];
|
||||
|
||||
extern const uint8_t ff_mjpeg_bits_dc_chrominance[];
|
||||
extern const uint8_t avpriv_mjpeg_bits_dc_chrominance[];
|
||||
|
||||
extern const uint8_t ff_mjpeg_bits_ac_luminance[];
|
||||
extern const uint8_t ff_mjpeg_val_ac_luminance[];
|
||||
extern const uint8_t avpriv_mjpeg_bits_ac_luminance[];
|
||||
extern const uint8_t avpriv_mjpeg_val_ac_luminance[];
|
||||
|
||||
extern const uint8_t ff_mjpeg_bits_ac_chrominance[];
|
||||
extern const uint8_t ff_mjpeg_val_ac_chrominance[];
|
||||
extern const uint8_t avpriv_mjpeg_bits_ac_chrominance[];
|
||||
extern const uint8_t avpriv_mjpeg_val_ac_chrominance[];
|
||||
|
||||
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code,
|
||||
const uint8_t *bits_table,
|
||||
|
@ -59,15 +59,15 @@ static uint8_t *append(uint8_t *buf, const uint8_t *src, int size)
|
||||
static uint8_t *append_dht_segment(uint8_t *buf)
|
||||
{
|
||||
buf = append(buf, dht_segment_head, sizeof(dht_segment_head));
|
||||
buf = append(buf, ff_mjpeg_bits_dc_luminance + 1, 16);
|
||||
buf = append(buf, avpriv_mjpeg_bits_dc_luminance + 1, 16);
|
||||
buf = append(buf, dht_segment_frag, sizeof(dht_segment_frag));
|
||||
buf = append(buf, ff_mjpeg_val_dc, 12);
|
||||
buf = append(buf, avpriv_mjpeg_val_dc, 12);
|
||||
*(buf++) = 0x10;
|
||||
buf = append(buf, ff_mjpeg_bits_ac_luminance + 1, 16);
|
||||
buf = append(buf, ff_mjpeg_val_ac_luminance, 162);
|
||||
buf = append(buf, avpriv_mjpeg_bits_ac_luminance + 1, 16);
|
||||
buf = append(buf, avpriv_mjpeg_val_ac_luminance, 162);
|
||||
*(buf++) = 0x11;
|
||||
buf = append(buf, ff_mjpeg_bits_ac_chrominance + 1, 16);
|
||||
buf = append(buf, ff_mjpeg_val_ac_chrominance, 162);
|
||||
buf = append(buf, avpriv_mjpeg_bits_ac_chrominance + 1, 16);
|
||||
buf = append(buf, avpriv_mjpeg_val_ac_chrominance, 162);
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
@ -65,18 +65,18 @@ static int build_vlc(VLC *vlc, const uint8_t *bits_table,
|
||||
|
||||
static void build_basic_mjpeg_vlc(MJpegDecodeContext *s)
|
||||
{
|
||||
build_vlc(&s->vlcs[0][0], ff_mjpeg_bits_dc_luminance,
|
||||
ff_mjpeg_val_dc, 12, 0, 0);
|
||||
build_vlc(&s->vlcs[0][1], ff_mjpeg_bits_dc_chrominance,
|
||||
ff_mjpeg_val_dc, 12, 0, 0);
|
||||
build_vlc(&s->vlcs[1][0], ff_mjpeg_bits_ac_luminance,
|
||||
ff_mjpeg_val_ac_luminance, 251, 0, 1);
|
||||
build_vlc(&s->vlcs[1][1], ff_mjpeg_bits_ac_chrominance,
|
||||
ff_mjpeg_val_ac_chrominance, 251, 0, 1);
|
||||
build_vlc(&s->vlcs[2][0], ff_mjpeg_bits_ac_luminance,
|
||||
ff_mjpeg_val_ac_luminance, 251, 0, 0);
|
||||
build_vlc(&s->vlcs[2][1], ff_mjpeg_bits_ac_chrominance,
|
||||
ff_mjpeg_val_ac_chrominance, 251, 0, 0);
|
||||
build_vlc(&s->vlcs[0][0], avpriv_mjpeg_bits_dc_luminance,
|
||||
avpriv_mjpeg_val_dc, 12, 0, 0);
|
||||
build_vlc(&s->vlcs[0][1], avpriv_mjpeg_bits_dc_chrominance,
|
||||
avpriv_mjpeg_val_dc, 12, 0, 0);
|
||||
build_vlc(&s->vlcs[1][0], avpriv_mjpeg_bits_ac_luminance,
|
||||
avpriv_mjpeg_val_ac_luminance, 251, 0, 1);
|
||||
build_vlc(&s->vlcs[1][1], avpriv_mjpeg_bits_ac_chrominance,
|
||||
avpriv_mjpeg_val_ac_chrominance, 251, 0, 1);
|
||||
build_vlc(&s->vlcs[2][0], avpriv_mjpeg_bits_ac_luminance,
|
||||
avpriv_mjpeg_val_ac_luminance, 251, 0, 0);
|
||||
build_vlc(&s->vlcs[2][1], avpriv_mjpeg_bits_ac_chrominance,
|
||||
avpriv_mjpeg_val_ac_chrominance, 251, 0, 0);
|
||||
}
|
||||
|
||||
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
|
||||
|
@ -60,20 +60,20 @@ av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
|
||||
/* build all the huffman tables */
|
||||
ff_mjpeg_build_huffman_codes(m->huff_size_dc_luminance,
|
||||
m->huff_code_dc_luminance,
|
||||
ff_mjpeg_bits_dc_luminance,
|
||||
ff_mjpeg_val_dc);
|
||||
avpriv_mjpeg_bits_dc_luminance,
|
||||
avpriv_mjpeg_val_dc);
|
||||
ff_mjpeg_build_huffman_codes(m->huff_size_dc_chrominance,
|
||||
m->huff_code_dc_chrominance,
|
||||
ff_mjpeg_bits_dc_chrominance,
|
||||
ff_mjpeg_val_dc);
|
||||
avpriv_mjpeg_bits_dc_chrominance,
|
||||
avpriv_mjpeg_val_dc);
|
||||
ff_mjpeg_build_huffman_codes(m->huff_size_ac_luminance,
|
||||
m->huff_code_ac_luminance,
|
||||
ff_mjpeg_bits_ac_luminance,
|
||||
ff_mjpeg_val_ac_luminance);
|
||||
avpriv_mjpeg_bits_ac_luminance,
|
||||
avpriv_mjpeg_val_ac_luminance);
|
||||
ff_mjpeg_build_huffman_codes(m->huff_size_ac_chrominance,
|
||||
m->huff_code_ac_chrominance,
|
||||
ff_mjpeg_bits_ac_chrominance,
|
||||
ff_mjpeg_val_ac_chrominance);
|
||||
avpriv_mjpeg_bits_ac_chrominance,
|
||||
avpriv_mjpeg_val_ac_chrominance);
|
||||
|
||||
s->mjpeg_ctx = m;
|
||||
return 0;
|
||||
@ -146,15 +146,15 @@ static void jpeg_table_header(MpegEncContext *s)
|
||||
ptr = put_bits_ptr(p);
|
||||
put_bits(p, 16, 0); /* patched later */
|
||||
size = 2;
|
||||
size += put_huffman_table(s, 0, 0, ff_mjpeg_bits_dc_luminance,
|
||||
ff_mjpeg_val_dc);
|
||||
size += put_huffman_table(s, 0, 1, ff_mjpeg_bits_dc_chrominance,
|
||||
ff_mjpeg_val_dc);
|
||||
size += put_huffman_table(s, 0, 0, avpriv_mjpeg_bits_dc_luminance,
|
||||
avpriv_mjpeg_val_dc);
|
||||
size += put_huffman_table(s, 0, 1, avpriv_mjpeg_bits_dc_chrominance,
|
||||
avpriv_mjpeg_val_dc);
|
||||
|
||||
size += put_huffman_table(s, 1, 0, ff_mjpeg_bits_ac_luminance,
|
||||
ff_mjpeg_val_ac_luminance);
|
||||
size += put_huffman_table(s, 1, 1, ff_mjpeg_bits_ac_chrominance,
|
||||
ff_mjpeg_val_ac_chrominance);
|
||||
size += put_huffman_table(s, 1, 0, avpriv_mjpeg_bits_ac_luminance,
|
||||
avpriv_mjpeg_val_ac_luminance);
|
||||
size += put_huffman_table(s, 1, 1, avpriv_mjpeg_bits_ac_chrominance,
|
||||
avpriv_mjpeg_val_ac_chrominance);
|
||||
AV_WB16(ptr, size);
|
||||
}
|
||||
|
||||
@ -169,7 +169,7 @@ static void jpeg_put_comments(MpegEncContext *s)
|
||||
/* JFIF header */
|
||||
put_marker(p, APP0);
|
||||
put_bits(p, 16, 16);
|
||||
ff_put_string(p, "JFIF", 1); /* this puts the trailing zero-byte too */
|
||||
avpriv_put_string(p, "JFIF", 1); /* this puts the trailing zero-byte too */
|
||||
put_bits(p, 16, 0x0102); /* v 1.02 */
|
||||
put_bits(p, 8, 0); /* units type: 0 - aspect ratio */
|
||||
put_bits(p, 16, s->avctx->sample_aspect_ratio.num);
|
||||
@ -184,7 +184,7 @@ static void jpeg_put_comments(MpegEncContext *s)
|
||||
flush_put_bits(p);
|
||||
ptr = put_bits_ptr(p);
|
||||
put_bits(p, 16, 0); /* patched later */
|
||||
ff_put_string(p, LIBAVCODEC_IDENT, 1);
|
||||
avpriv_put_string(p, LIBAVCODEC_IDENT, 1);
|
||||
size = strlen(LIBAVCODEC_IDENT)+3;
|
||||
AV_WB16(ptr, size);
|
||||
}
|
||||
@ -196,7 +196,7 @@ static void jpeg_put_comments(MpegEncContext *s)
|
||||
flush_put_bits(p);
|
||||
ptr = put_bits_ptr(p);
|
||||
put_bits(p, 16, 0); /* patched later */
|
||||
ff_put_string(p, "CS=ITU601", 1);
|
||||
avpriv_put_string(p, "CS=ITU601", 1);
|
||||
size = strlen("CS=ITU601")+3;
|
||||
AV_WB16(ptr, size);
|
||||
}
|
||||
|
@ -1035,7 +1035,7 @@ static void mpeg4_encode_vol_header(MpegEncContext * s, int vo_number, int vol_n
|
||||
if(!(s->flags & CODEC_FLAG_BITEXACT)){
|
||||
put_bits(&s->pb, 16, 0);
|
||||
put_bits(&s->pb, 16, 0x1B2); /* user_data */
|
||||
ff_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
|
||||
avpriv_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ static inline void flush_put_bits(PutBitContext *s)
|
||||
|
||||
#ifdef BITSTREAM_WRITER_LE
|
||||
#define avpriv_align_put_bits align_put_bits_unsupported_here
|
||||
#define ff_put_string ff_put_string_unsupported_here
|
||||
#define avpriv_put_string ff_put_string_unsupported_here
|
||||
#define avpriv_copy_bits avpriv_copy_bits_unsupported_here
|
||||
#else
|
||||
/**
|
||||
@ -112,7 +112,7 @@ void avpriv_align_put_bits(PutBitContext *s);
|
||||
*
|
||||
* @param terminate_string 0-terminates the written string if value is 1
|
||||
*/
|
||||
void ff_put_string(PutBitContext *pb, const char *string, int terminate_string);
|
||||
void avpriv_put_string(PutBitContext *pb, const char *string, int terminate_string);
|
||||
|
||||
/**
|
||||
* Copy the content of src to the bitstream.
|
||||
|
@ -11,7 +11,6 @@ MMX-OBJS += x86/dsputil_mmx.o \
|
||||
x86/fmtconvert_init.o \
|
||||
x86/idct_mmx_xvid.o \
|
||||
x86/idct_sse2_xvid.o \
|
||||
x86/motion_est.o \
|
||||
x86/simple_idct.o \
|
||||
|
||||
MMX-OBJS-$(CONFIG_AAC_DECODER) += x86/sbrdsp_init.o
|
||||
@ -19,7 +18,8 @@ MMX-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp_init.o
|
||||
MMX-OBJS-$(CONFIG_CAVS_DECODER) += x86/cavsdsp.o
|
||||
MMX-OBJS-$(CONFIG_DWT) += x86/snowdsp.o \
|
||||
x86/dwt.o
|
||||
MMX-OBJS-$(CONFIG_ENCODERS) += x86/dsputilenc_mmx.o
|
||||
MMX-OBJS-$(CONFIG_ENCODERS) += x86/dsputilenc_mmx.o \
|
||||
x86/motion_est.o
|
||||
MMX-OBJS-$(CONFIG_FFT) += x86/fft_init.o
|
||||
MMX-OBJS-$(CONFIG_GPL) += x86/idct_mmx.o
|
||||
MMX-OBJS-$(CONFIG_H264DSP) += x86/h264dsp_init.o
|
||||
|
@ -30,6 +30,8 @@
|
||||
#undef NDEBUG
|
||||
#include <assert.h>
|
||||
|
||||
#define PERM_RWP AV_PERM_WRITE | AV_PERM_PRESERVE | AV_PERM_REUSE
|
||||
|
||||
#define CHECK(j)\
|
||||
{ int score = FFABS(cur[mrefs-1+(j)] - cur[prefs-1-(j)])\
|
||||
+ FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\
|
||||
@ -46,19 +48,19 @@
|
||||
int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
|
||||
int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \
|
||||
int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \
|
||||
int diff = FFMAX3(temporal_diff0>>1, temporal_diff1, temporal_diff2); \
|
||||
int spatial_pred = (c+e)>>1; \
|
||||
int spatial_score = FFABS(cur[mrefs-1] - cur[prefs-1]) + FFABS(c-e) \
|
||||
+ FFABS(cur[mrefs+1] - cur[prefs+1]) - 1; \
|
||||
int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
|
||||
int spatial_pred = (c+e) >> 1; \
|
||||
int spatial_score = FFABS(cur[mrefs - 1] - cur[prefs - 1]) + FFABS(c-e) \
|
||||
+ FFABS(cur[mrefs + 1] - cur[prefs + 1]) - 1; \
|
||||
\
|
||||
CHECK(-1) CHECK(-2) }} }} \
|
||||
CHECK( 1) CHECK( 2) }} }} \
|
||||
\
|
||||
if (mode < 2) { \
|
||||
int b = (prev2[2*mrefs] + next2[2*mrefs])>>1; \
|
||||
int f = (prev2[2*prefs] + next2[2*prefs])>>1; \
|
||||
int max = FFMAX3(d-e, d-c, FFMIN(b-c, f-e)); \
|
||||
int min = FFMIN3(d-e, d-c, FFMAX(b-c, f-e)); \
|
||||
int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
|
||||
int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
|
||||
int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
|
||||
int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \
|
||||
\
|
||||
diff = FFMAX3(diff, min, -max); \
|
||||
} \
|
||||
@ -91,7 +93,8 @@ static void filter_line_c(uint8_t *dst,
|
||||
|
||||
static void filter_line_c_16bit(uint16_t *dst,
|
||||
uint16_t *prev, uint16_t *cur, uint16_t *next,
|
||||
int w, int prefs, int mrefs, int parity, int mode)
|
||||
int w, int prefs, int mrefs, int parity,
|
||||
int mode)
|
||||
{
|
||||
int x;
|
||||
uint16_t *prev2 = parity ? prev : cur ;
|
||||
@ -129,11 +132,11 @@ static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic,
|
||||
|
||||
for (y = 0; y < h; y++) {
|
||||
if ((y ^ parity) & 1) {
|
||||
uint8_t *prev = &yadif->prev->data[i][y*refs];
|
||||
uint8_t *cur = &yadif->cur ->data[i][y*refs];
|
||||
uint8_t *next = &yadif->next->data[i][y*refs];
|
||||
uint8_t *dst = &dstpic->data[i][y*dstpic->linesize[i]];
|
||||
int mode = y==1 || y+2==h ? 2 : yadif->mode;
|
||||
uint8_t *prev = &yadif->prev->data[i][y * refs];
|
||||
uint8_t *cur = &yadif->cur ->data[i][y * refs];
|
||||
uint8_t *next = &yadif->next->data[i][y * refs];
|
||||
uint8_t *dst = &dstpic->data[i][y * dstpic->linesize[i]];
|
||||
int mode = y == 1 || y + 2 == h ? 2 : yadif->mode;
|
||||
int prefs = y+1<h ? refs : -refs;
|
||||
int mrefs = y ?-refs : refs;
|
||||
|
||||
@ -151,10 +154,12 @@ static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic,
|
||||
cur = tmp;
|
||||
}
|
||||
|
||||
yadif->filter_line(dst, prev, cur, next, w, prefs, mrefs, parity ^ tff, mode);
|
||||
yadif->filter_line(dst, prev, cur, next, w,
|
||||
prefs, mrefs,
|
||||
parity ^ tff, mode);
|
||||
} else {
|
||||
memcpy(&dstpic->data[i][y*dstpic->linesize[i]],
|
||||
&yadif->cur->data[i][y*refs], w*df);
|
||||
memcpy(&dstpic->data[i][y * dstpic->linesize[i]],
|
||||
&yadif->cur->data[i][y * refs], w * df);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -165,19 +170,18 @@ static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic,
|
||||
static int return_frame(AVFilterContext *ctx, int is_second)
|
||||
{
|
||||
YADIFContext *yadif = ctx->priv;
|
||||
AVFilterLink *link= ctx->outputs[0];
|
||||
AVFilterLink *link = ctx->outputs[0];
|
||||
int tff, ret;
|
||||
|
||||
if (yadif->parity == -1) {
|
||||
tff = yadif->cur->video->interlaced ?
|
||||
yadif->cur->video->top_field_first : 1;
|
||||
yadif->cur->video->top_field_first : 1;
|
||||
} else {
|
||||
tff = yadif->parity^1;
|
||||
tff = yadif->parity ^ 1;
|
||||
}
|
||||
|
||||
if (is_second) {
|
||||
yadif->out = ff_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE |
|
||||
AV_PERM_REUSE, link->w, link->h);
|
||||
yadif->out = ff_get_video_buffer(link, PERM_RWP, link->w, link->h);
|
||||
if (!yadif->out)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@ -253,15 +257,17 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
|
||||
!(yadif->prev = avfilter_ref_buffer(yadif->cur, ~AV_PERM_WRITE)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
yadif->out = ff_get_video_buffer(ctx->outputs[0], AV_PERM_WRITE | AV_PERM_PRESERVE |
|
||||
AV_PERM_REUSE, link->w, link->h);
|
||||
yadif->out = ff_get_video_buffer(ctx->outputs[0], PERM_RWP,
|
||||
link->w, link->h);
|
||||
if (!yadif->out)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
|
||||
yadif->out->video->interlaced = 0;
|
||||
|
||||
if (yadif->out->pts != AV_NOPTS_VALUE)
|
||||
yadif->out->pts *= 2;
|
||||
|
||||
return ff_start_frame(ctx->outputs[0], yadif->out);
|
||||
}
|
||||
|
||||
@ -304,6 +310,7 @@ static int request_frame(AVFilterLink *link)
|
||||
|
||||
if (ret == AVERROR_EOF && yadif->cur) {
|
||||
AVFilterBufferRef *next = avfilter_ref_buffer(yadif->next, ~AV_PERM_WRITE);
|
||||
|
||||
if (!next)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@ -332,7 +339,8 @@ static int poll_frame(AVFilterLink *link)
|
||||
if (val <= 0)
|
||||
return val;
|
||||
|
||||
if (val >= 1 && !yadif->next) { //FIXME change API to not requre this red tape
|
||||
//FIXME change API to not requre this red tape
|
||||
if (val >= 1 && !yadif->next) {
|
||||
if ((ret = ff_request_frame(link->src->inputs[0])) < 0)
|
||||
return ret;
|
||||
val = ff_poll_frame(link->src->inputs[0]);
|
||||
@ -398,14 +406,17 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
yadif->auto_enable = 0;
|
||||
yadif->csp = NULL;
|
||||
|
||||
if (args) sscanf(args, "%d:%d:%d", &yadif->mode, &yadif->parity, &yadif->auto_enable);
|
||||
if (args)
|
||||
sscanf(args, "%d:%d:%d",
|
||||
&yadif->mode, &yadif->parity, &yadif->auto_enable);
|
||||
|
||||
yadif->filter_line = filter_line_c;
|
||||
|
||||
if (HAVE_MMX)
|
||||
ff_yadif_init_x86(yadif);
|
||||
|
||||
av_log(ctx, AV_LOG_VERBOSE, "mode:%d parity:%d auto_enable:%d\n", yadif->mode, yadif->parity, yadif->auto_enable);
|
||||
av_log(ctx, AV_LOG_VERBOSE, "mode:%d parity:%d auto_enable:%d\n",
|
||||
yadif->mode, yadif->parity, yadif->auto_enable);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -293,6 +293,7 @@ OBJS-$(CONFIG_RTPDEC) += rdt.o \
|
||||
rtpdec_h263_rfc2190.o \
|
||||
rtpdec_h264.o \
|
||||
rtpdec_ilbc.o \
|
||||
rtpdec_jpeg.o \
|
||||
rtpdec_latm.o \
|
||||
rtpdec_mpeg4.o \
|
||||
rtpdec_qcelp.o \
|
||||
|
@ -69,6 +69,7 @@ void av_register_rtp_dynamic_payload_handlers(void)
|
||||
ff_register_dynamic_payload_handler(&ff_h263_rfc2190_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_h264_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_ilbc_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_jpeg_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_vorbis_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_theora_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_qdm2_dynamic_handler);
|
||||
|
@ -46,6 +46,7 @@ extern RTPDynamicProtocolHandler ff_h263_2000_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_h263_rfc2190_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_h264_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_ilbc_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_jpeg_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_mp4a_latm_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_mp4v_es_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_mpeg4_generic_dynamic_handler;
|
||||
|
331
libavformat/rtpdec_jpeg.c
Normal file
331
libavformat/rtpdec_jpeg.c
Normal file
@ -0,0 +1,331 @@
|
||||
/*
|
||||
* RTP JPEG-compressed Video Depacketizer, RFC 2435
|
||||
* Copyright (c) 2012 Samuel Pitoiset
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "avformat.h"
|
||||
#include "rtpdec_formats.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavcodec/mjpeg.h"
|
||||
|
||||
/**
|
||||
* RTP/JPEG specific private data.
|
||||
*/
|
||||
struct PayloadContext {
|
||||
AVIOContext *frame; ///< current frame buffer
|
||||
uint32_t timestamp; ///< current frame timestamp
|
||||
int hdr_size; ///< size of the current frame header
|
||||
};
|
||||
|
||||
static PayloadContext *jpeg_new_context(void)
|
||||
{
|
||||
return av_mallocz(sizeof(PayloadContext));
|
||||
}
|
||||
|
||||
static inline void free_frame_if_needed(PayloadContext *jpeg)
|
||||
{
|
||||
if (jpeg->frame) {
|
||||
uint8_t *p;
|
||||
avio_close_dyn_buf(jpeg->frame, &p);
|
||||
av_free(p);
|
||||
jpeg->frame = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void jpeg_free_context(PayloadContext *jpeg)
|
||||
{
|
||||
free_frame_if_needed(jpeg);
|
||||
av_free(jpeg);
|
||||
}
|
||||
|
||||
static void jpeg_create_huffman_table(PutBitContext *p, int table_class,
|
||||
int table_id, const uint8_t *bits_table,
|
||||
const uint8_t *value_table)
|
||||
{
|
||||
int i, n = 0;
|
||||
|
||||
put_bits(p, 8, 0);
|
||||
put_bits(p, 4, table_class);
|
||||
put_bits(p, 4, table_id);
|
||||
|
||||
for (i = 1; i <= 16; i++) {
|
||||
n += bits_table[i];
|
||||
put_bits(p, 8, bits_table[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
put_bits(p, 8, value_table[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static int jpeg_create_header(uint8_t *buf, int size, uint32_t type, uint32_t w,
|
||||
uint32_t h, const uint8_t *qtable, int nb_qtable)
|
||||
{
|
||||
PutBitContext pbc;
|
||||
|
||||
init_put_bits(&pbc, buf, size);
|
||||
|
||||
/* Convert from blocks to pixels. */
|
||||
w <<= 3;
|
||||
h <<= 3;
|
||||
|
||||
/* SOI */
|
||||
put_marker(&pbc, SOI);
|
||||
|
||||
/* JFIF header */
|
||||
put_marker(&pbc, APP0);
|
||||
put_bits(&pbc, 16, 16);
|
||||
avpriv_put_string(&pbc, "JFIF", 1);
|
||||
put_bits(&pbc, 16, 0x0201);
|
||||
put_bits(&pbc, 8, 0);
|
||||
put_bits(&pbc, 16, 1);
|
||||
put_bits(&pbc, 16, 1);
|
||||
put_bits(&pbc, 8, 0);
|
||||
put_bits(&pbc, 8, 0);
|
||||
|
||||
/* DQT */
|
||||
put_marker(&pbc, DQT);
|
||||
if (nb_qtable == 2) {
|
||||
put_bits(&pbc, 16, 2 + 2 * (1 + 64));
|
||||
} else {
|
||||
put_bits(&pbc, 16, 2 + 1 * (1 + 64));
|
||||
}
|
||||
put_bits(&pbc, 8, 0);
|
||||
|
||||
/* Each table is an array of 64 values given in zig-zag
|
||||
* order, identical to the format used in a JFIF DQT
|
||||
* marker segment. */
|
||||
avpriv_copy_bits(&pbc, qtable, 64 * 8);
|
||||
|
||||
if (nb_qtable == 2) {
|
||||
put_bits(&pbc, 8, 1);
|
||||
avpriv_copy_bits(&pbc, qtable + 64, 64 * 8);
|
||||
}
|
||||
|
||||
/* DHT */
|
||||
put_marker(&pbc, DHT);
|
||||
|
||||
jpeg_create_huffman_table(&pbc, 0, 0, avpriv_mjpeg_bits_dc_luminance,
|
||||
avpriv_mjpeg_val_dc);
|
||||
jpeg_create_huffman_table(&pbc, 0, 1, avpriv_mjpeg_bits_dc_chrominance,
|
||||
avpriv_mjpeg_val_dc);
|
||||
jpeg_create_huffman_table(&pbc, 1, 0, avpriv_mjpeg_bits_ac_luminance,
|
||||
avpriv_mjpeg_val_ac_luminance);
|
||||
jpeg_create_huffman_table(&pbc, 1, 1, avpriv_mjpeg_bits_ac_chrominance,
|
||||
avpriv_mjpeg_val_ac_chrominance);
|
||||
|
||||
/* SOF0 */
|
||||
put_marker(&pbc, SOF0);
|
||||
put_bits(&pbc, 16, 17);
|
||||
put_bits(&pbc, 8, 8);
|
||||
put_bits(&pbc, 8, h >> 8);
|
||||
put_bits(&pbc, 8, h);
|
||||
put_bits(&pbc, 8, w >> 8);
|
||||
put_bits(&pbc, 8, w);
|
||||
put_bits(&pbc, 8, 3);
|
||||
put_bits(&pbc, 8, 1);
|
||||
put_bits(&pbc, 8, type ? 34 : 33);
|
||||
put_bits(&pbc, 8, 0);
|
||||
put_bits(&pbc, 8, 2);
|
||||
put_bits(&pbc, 8, 17);
|
||||
put_bits(&pbc, 8, nb_qtable == 2 ? 1 : 0);
|
||||
put_bits(&pbc, 8, 3);
|
||||
put_bits(&pbc, 8, 17);
|
||||
put_bits(&pbc, 8, nb_qtable == 2 ? 1 : 0);
|
||||
|
||||
/* SOS */
|
||||
put_marker(&pbc, SOS);
|
||||
put_bits(&pbc, 16, 12);
|
||||
put_bits(&pbc, 8, 3);
|
||||
put_bits(&pbc, 8, 1);
|
||||
put_bits(&pbc, 8, 0);
|
||||
put_bits(&pbc, 8, 2);
|
||||
put_bits(&pbc, 8, 17);
|
||||
put_bits(&pbc, 8, 3);
|
||||
put_bits(&pbc, 8, 17);
|
||||
put_bits(&pbc, 8, 0);
|
||||
put_bits(&pbc, 8, 63);
|
||||
put_bits(&pbc, 8, 0);
|
||||
|
||||
/* Fill the buffer. */
|
||||
flush_put_bits(&pbc);
|
||||
|
||||
/* Return the length in bytes of the JPEG header. */
|
||||
return put_bits_count(&pbc) / 8;
|
||||
}
|
||||
|
||||
static int jpeg_parse_packet(AVFormatContext *ctx, PayloadContext *jpeg,
|
||||
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
|
||||
const uint8_t *buf, int len, int flags)
|
||||
{
|
||||
uint8_t type, q, width, height;
|
||||
const uint8_t *qtables = NULL;
|
||||
uint16_t qtable_len;
|
||||
uint32_t off;
|
||||
int ret;
|
||||
|
||||
if (len < 8) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Parse the main JPEG header. */
|
||||
off = AV_RB24(buf + 1); /* fragment byte offset */
|
||||
type = AV_RB8(buf + 4); /* id of jpeg decoder params */
|
||||
q = AV_RB8(buf + 5); /* quantization factor (or table id) */
|
||||
width = AV_RB8(buf + 6); /* frame width in 8 pixel blocks */
|
||||
height = AV_RB8(buf + 7); /* frame height in 8 pixel blocks */
|
||||
buf += 8;
|
||||
len -= 8;
|
||||
|
||||
/* Parse the restart marker header. */
|
||||
if (type > 63) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Unimplemented RTP/JPEG restart marker header.\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
/* Parse the quantization table header. */
|
||||
if (q > 127 && off == 0) {
|
||||
uint8_t precision;
|
||||
|
||||
if (len < 4) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* The first byte is reserved for future use. */
|
||||
precision = AV_RB8(buf + 1); /* size of coefficients */
|
||||
qtable_len = AV_RB16(buf + 2); /* length in bytes */
|
||||
buf += 4;
|
||||
len -= 4;
|
||||
|
||||
if (precision)
|
||||
av_log(ctx, AV_LOG_WARNING, "Only 8-bit precision is supported.\n");
|
||||
|
||||
if (q == 255 && qtable_len == 0) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Invalid RTP/JPEG packet. Quantization tables not found.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (qtable_len > 0) {
|
||||
if (len < qtable_len) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
qtables = buf;
|
||||
buf += qtable_len;
|
||||
len -= qtable_len;
|
||||
}
|
||||
}
|
||||
|
||||
if (off == 0) {
|
||||
/* Start of JPEG data packet. */
|
||||
uint8_t hdr[1024];
|
||||
|
||||
/* Skip the current frame in case of the end packet
|
||||
* has been lost somewhere. */
|
||||
free_frame_if_needed(jpeg);
|
||||
|
||||
if ((ret = avio_open_dyn_buf(&jpeg->frame)) < 0)
|
||||
return ret;
|
||||
jpeg->timestamp = *timestamp;
|
||||
|
||||
if (!qtables) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Unimplemented default quantization tables.\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
/* Generate a frame and scan headers that can be prepended to the
|
||||
* RTP/JPEG data payload to produce a JPEG compressed image in
|
||||
* interchange format. */
|
||||
jpeg->hdr_size = jpeg_create_header(hdr, sizeof(hdr), type, width,
|
||||
height, qtables,
|
||||
qtable_len > 64 ? 2 : 1);
|
||||
|
||||
/* Copy JPEG header to frame buffer. */
|
||||
avio_write(jpeg->frame, hdr, jpeg->hdr_size);
|
||||
}
|
||||
|
||||
if (!jpeg->frame) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Received packet without a start chunk; dropping frame.\n");
|
||||
return AVERROR(EAGAIN);
|
||||
}
|
||||
|
||||
if (jpeg->timestamp != *timestamp) {
|
||||
/* Skip the current frame if timestamp is incorrect.
|
||||
* A start packet has been lost somewhere. */
|
||||
free_frame_if_needed(jpeg);
|
||||
av_log(ctx, AV_LOG_ERROR, "RTP timestamps don't match.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (off != avio_tell(jpeg->frame) - jpeg->hdr_size) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Missing packets; dropping frame.\n");
|
||||
return AVERROR(EAGAIN);
|
||||
}
|
||||
|
||||
/* Copy data to frame buffer. */
|
||||
avio_write(jpeg->frame, buf, len);
|
||||
|
||||
if (flags & RTP_FLAG_MARKER) {
|
||||
/* End of JPEG data packet. */
|
||||
PutBitContext pbc;
|
||||
uint8_t buf[2];
|
||||
|
||||
/* Put EOI marker. */
|
||||
init_put_bits(&pbc, buf, sizeof(buf));
|
||||
put_marker(&pbc, EOI);
|
||||
flush_put_bits(&pbc);
|
||||
avio_write(jpeg->frame, buf, sizeof(buf));
|
||||
|
||||
/* Prepare the JPEG packet. */
|
||||
av_init_packet(pkt);
|
||||
pkt->size = avio_close_dyn_buf(jpeg->frame, &pkt->data);
|
||||
if (pkt->size < 0) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Error occured when getting frame buffer.\n");
|
||||
jpeg->frame = NULL;
|
||||
return pkt->size;
|
||||
}
|
||||
pkt->stream_index = st->index;
|
||||
pkt->destruct = av_destruct_packet;
|
||||
|
||||
/* Re-init the frame buffer. */
|
||||
jpeg->frame = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return AVERROR(EAGAIN);
|
||||
}
|
||||
|
||||
RTPDynamicProtocolHandler ff_jpeg_dynamic_handler = {
|
||||
.enc_name = "JPEG",
|
||||
.codec_type = AVMEDIA_TYPE_VIDEO,
|
||||
.codec_id = AV_CODEC_ID_MJPEG,
|
||||
.alloc = jpeg_new_context,
|
||||
.free = jpeg_free_context,
|
||||
.parse_packet = jpeg_parse_packet,
|
||||
.static_payload_id = 26,
|
||||
};
|
@ -30,8 +30,8 @@
|
||||
#include "libavutil/avutil.h"
|
||||
|
||||
#define LIBAVFORMAT_VERSION_MAJOR 54
|
||||
#define LIBAVFORMAT_VERSION_MINOR 25
|
||||
#define LIBAVFORMAT_VERSION_MICRO 105
|
||||
#define LIBAVFORMAT_VERSION_MINOR 26
|
||||
#define LIBAVFORMAT_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
|
||||
LIBAVFORMAT_VERSION_MINOR, \
|
||||
|
@ -1,41 +1,33 @@
|
||||
FATE_MP3 += fate-mp3-float-conf-compl
|
||||
fate-mp3-float-conf-compl: CMD = pcm -acodec mp3float -i $(SAMPLES)/mp3-conformance/compl.bit
|
||||
fate-mp3-float-conf-compl: CMP = stddev
|
||||
fate-mp3-float-conf-compl: REF = $(SAMPLES)/mp3-conformance/compl.pcm
|
||||
|
||||
FATE_MP3 += fate-mp3-float-conf-he_32khz
|
||||
fate-mp3-float-conf-he_32khz: CMD = pcm -acodec mp3float -i $(SAMPLES)/mp3-conformance/he_32khz.bit -fs 343296
|
||||
fate-mp3-float-conf-he_32khz: CMP = stddev
|
||||
fate-mp3-float-conf-he_32khz: REF = $(SAMPLES)/mp3-conformance/he_32khz.pcm
|
||||
|
||||
FATE_MP3 += fate-mp3-float-conf-he_44khz
|
||||
fate-mp3-float-conf-he_44khz: CMD = pcm -acodec mp3float -i $(SAMPLES)/mp3-conformance/he_44khz.bit -fs 942336
|
||||
fate-mp3-float-conf-he_44khz: CMP = stddev
|
||||
fate-mp3-float-conf-he_44khz: REF = $(SAMPLES)/mp3-conformance/he_44khz.pcm
|
||||
|
||||
FATE_MP3 += fate-mp3-float-conf-he_48khz
|
||||
fate-mp3-float-conf-he_48khz: CMD = pcm -acodec mp3float -i $(SAMPLES)/mp3-conformance/he_48khz.bit -fs 343296
|
||||
fate-mp3-float-conf-he_48khz: CMP = stddev
|
||||
fate-mp3-float-conf-he_48khz: REF = $(SAMPLES)/mp3-conformance/he_48khz.pcm
|
||||
|
||||
FATE_MP3 += fate-mp3-float-conf-hecommon
|
||||
fate-mp3-float-conf-hecommon: CMD = pcm -acodec mp3float -i $(SAMPLES)/mp3-conformance/hecommon.bit -fs 133632
|
||||
fate-mp3-float-conf-hecommon: CMP = stddev
|
||||
fate-mp3-float-conf-hecommon: REF = $(SAMPLES)/mp3-conformance/hecommon.pcm
|
||||
|
||||
FATE_MP3 += fate-mp3-float-conf-si
|
||||
fate-mp3-float-conf-si: CMD = pcm -acodec mp3float -i $(SAMPLES)/mp3-conformance/si.bit -fs 269568
|
||||
fate-mp3-float-conf-si: CMP = stddev
|
||||
fate-mp3-float-conf-si: REF = $(SAMPLES)/mp3-conformance/si.pcm
|
||||
|
||||
FATE_MP3 += fate-mp3-float-conf-si_block
|
||||
fate-mp3-float-conf-si_block: CMD = pcm -acodec mp3float -i $(SAMPLES)/mp3-conformance/si_block.bit -fs 145152
|
||||
fate-mp3-float-conf-si_block: CMP = stddev
|
||||
fate-mp3-float-conf-si_block: REF = $(SAMPLES)/mp3-conformance/si_block.pcm
|
||||
|
||||
FATE_MP3 += fate-mp3-float-extra_overread
|
||||
fate-mp3-float-extra_overread: CMD = pcm -c:a mp3float -i $(SAMPLES)/mpegaudio/extra_overread.mp3
|
||||
fate-mp3-float-extra_overread: CMP = stddev
|
||||
fate-mp3-float-extra_overread: REF = $(SAMPLES)/mpegaudio/extra_overread.pcm
|
||||
|
||||
FATE_SAMPLES_AVCONV += $(FATE_MP3)
|
||||
|
Loading…
Reference in New Issue
Block a user