From af1e247038c6c728b857118405adee0d2f184b40 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 11 Sep 2011 08:14:43 +0200 Subject: [PATCH 1/7] libxavs: add private options corresponding to deprecated global options Code mostly copied from libx264 wrapper. --- libavcodec/libxavs.c | 126 +++++++++++++++++++++++++++++++++---------- 1 file changed, 99 insertions(+), 27 deletions(-) diff --git a/libavcodec/libxavs.c b/libavcodec/libxavs.c index d11c4248e1..fa2d71c770 100644 --- a/libavcodec/libxavs.c +++ b/libavcodec/libxavs.c @@ -24,8 +24,11 @@ #include #include #include +#include #include #include "avcodec.h" +#include "internal.h" +#include "libavutil/opt.h" #define END_OF_STREAM 0x001 @@ -41,6 +44,15 @@ typedef struct XavsContext { int sei_size; AVFrame out_pic; int end_of_stream; + float crf; + int cqp; + int b_bias; + float cplxblur; + int direct_pred; + int aud; + int fast_pskip; + int mbtree; + int mixed_refs; } XavsContext; static void XAVS_log(void *p, int level, const char *fmt, va_list args) @@ -181,13 +193,17 @@ static av_cold int XAVS_init(AVCodecContext *avctx) x4->params.pf_log = XAVS_log; x4->params.p_log_private = avctx; x4->params.i_keyint_max = avctx->gop_size; - x4->params.rc.i_bitrate = avctx->bit_rate / 1000; + if (avctx->bit_rate) { + x4->params.rc.i_bitrate = avctx->bit_rate / 1000; + x4->params.rc.i_rc_method = XAVS_RC_ABR; + } x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000; x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000; x4->params.rc.b_stat_write = avctx->flags & CODEC_FLAG_PASS1; if (avctx->flags & CODEC_FLAG_PASS2) { x4->params.rc.b_stat_read = 1; } else { +#if FF_API_X264_GLOBAL_OPTS if (avctx->crf) { x4->params.rc.i_rc_method = XAVS_RC_CRF; x4->params.rc.f_rf_constant = avctx->crf; @@ -195,19 +211,63 @@ static av_cold int XAVS_init(AVCodecContext *avctx) x4->params.rc.i_rc_method = XAVS_RC_CQP; x4->params.rc.i_qp_constant = avctx->cqp; } +#endif + + if (x4->crf >= 0) { + x4->params.rc.i_rc_method = XAVS_RC_CRF; + x4->params.rc.f_rf_constant = x4->crf; + } else if (x4->cqp >= 0) { + x4->params.rc.i_rc_method = XAVS_RC_CQP; + x4->params.rc.i_qp_constant = x4->cqp; + } } - /* if neither crf nor cqp modes are selected we have to enable the RC */ - /* we do it this way because we cannot check if the bitrate has been set */ - if (!(avctx->crf || (avctx->cqp > -1))) - x4->params.rc.i_rc_method = XAVS_RC_ABR; +#if FF_API_X264_GLOBAL_OPTS + if (avctx->bframebias) + x4->params.i_bframe_bias = avctx->bframebias; + if (avctx->deblockalpha) + x4->params.i_deblocking_filter_alphac0 = avctx->deblockalpha; + if (avctx->deblockbeta) + x4->params.i_deblocking_filter_beta = avctx->deblockbeta; + if (avctx->complexityblur >= 0) + x4->params.rc.f_complexity_blur = avctx->complexityblur; + if (avctx->directpred >= 0) + x4->params.analyse.i_direct_mv_pred = avctx->directpred; + if (avctx->partitions) { + if (avctx->partitions & XAVS_PART_I8X8) + x4->params.analyse.inter |= XAVS_ANALYSE_I8x8; + if (avctx->partitions & XAVS_PART_P8X8) + x4->params.analyse.inter |= XAVS_ANALYSE_PSUB16x16; + if (avctx->partitions & XAVS_PART_B8X8) + x4->params.analyse.inter |= XAVS_ANALYSE_BSUB16x16; + } + x4->params.rc.b_mb_tree = !!(avctx->flags2 & CODEC_FLAG2_MBTREE); + x4->params.b_aud = avctx->flags2 & CODEC_FLAG2_AUD; + x4->params.analyse.b_mixed_references = avctx->flags2 & CODEC_FLAG2_MIXED_REFS; + x4->params.analyse.b_fast_pskip = avctx->flags2 & CODEC_FLAG2_FASTPSKIP; + x4->params.analyse.b_weighted_bipred = avctx->flags2 & CODEC_FLAG2_WPRED; +#endif + + if (x4->aud >= 0) + x4->params.b_aud = x4->aud; + if (x4->mbtree >= 0) + x4->params.rc.b_mb_tree = x4->mbtree; + if (x4->direct_pred >= 0) + x4->params.analyse.i_direct_mv_pred = x4->direct_pred; + if (x4->fast_pskip >= 0) + x4->params.analyse.b_fast_pskip = x4->fast_pskip; + if (x4->mixed_refs >= 0) + x4->params.analyse.b_mixed_references = x4->mixed_refs; + if (x4->b_bias != INT_MIN) + x4->params.i_bframe_bias = x4->b_bias; + if (x4->cplxblur >= 0) + x4->params.rc.f_complexity_blur = x4->cplxblur; x4->params.i_bframe = avctx->max_b_frames; /* cabac is not included in AVS JiZhun Profile */ x4->params.b_cabac = 0; x4->params.i_bframe_adaptive = avctx->b_frame_strategy; - x4->params.i_bframe_bias = avctx->bframebias; avctx->has_b_frames = !!avctx->max_b_frames; @@ -220,8 +280,6 @@ static av_cold int XAVS_init(AVCodecContext *avctx) x4->params.i_scenecut_threshold = avctx->scenechange_threshold; // x4->params.b_deblocking_filter = avctx->flags & CODEC_FLAG_LOOP_FILTER; - x4->params.i_deblocking_filter_alphac0 = avctx->deblockalpha; - x4->params.i_deblocking_filter_beta = avctx->deblockbeta; x4->params.rc.i_qp_min = avctx->qmin; x4->params.rc.i_qp_max = avctx->qmax; @@ -229,7 +287,6 @@ static av_cold int XAVS_init(AVCodecContext *avctx) x4->params.rc.f_qcompress = avctx->qcompress; /* 0.0 => cbr, 1.0 => constant qp */ x4->params.rc.f_qblur = avctx->qblur; /* temporally blur quants */ - x4->params.rc.f_complexity_blur = avctx->complexityblur; x4->params.i_frame_reference = avctx->refs; @@ -241,20 +298,6 @@ static av_cold int XAVS_init(AVCodecContext *avctx) x4->params.i_fps_num = avctx->time_base.den; x4->params.i_fps_den = avctx->time_base.num; x4->params.analyse.inter = XAVS_ANALYSE_I8x8 |XAVS_ANALYSE_PSUB16x16| XAVS_ANALYSE_BSUB16x16; - if (avctx->partitions) { - if (avctx->partitions & XAVS_PART_I8X8) - x4->params.analyse.inter |= XAVS_ANALYSE_I8x8; - - if (avctx->partitions & XAVS_PART_P8X8) - x4->params.analyse.inter |= XAVS_ANALYSE_PSUB16x16; - - if (avctx->partitions & XAVS_PART_B8X8) - x4->params.analyse.inter |= XAVS_ANALYSE_BSUB16x16; - } - - x4->params.analyse.i_direct_mv_pred = avctx->directpred; - - x4->params.analyse.b_weighted_bipred = avctx->flags2 & CODEC_FLAG2_WPRED; switch (avctx->me_method) { case ME_EPZS: @@ -279,11 +322,9 @@ static av_cold int XAVS_init(AVCodecContext *avctx) x4->params.analyse.i_me_range = avctx->me_range; x4->params.analyse.i_subpel_refine = avctx->me_subpel_quality; - x4->params.analyse.b_mixed_references = avctx->flags2 & CODEC_FLAG2_MIXED_REFS; x4->params.analyse.b_chroma_me = avctx->me_cmp & FF_CMP_CHROMA; /* AVS P2 only enables 8x8 transform */ x4->params.analyse.b_transform_8x8 = 1; //avctx->flags2 & CODEC_FLAG2_8X8DCT; - x4->params.analyse.b_fast_pskip = avctx->flags2 & CODEC_FLAG2_FASTPSKIP; x4->params.analyse.i_trellis = avctx->trellis; x4->params.analyse.i_noise_reduction = avctx->noise_reduction; @@ -303,14 +344,12 @@ static av_cold int XAVS_init(AVCodecContext *avctx) /* TAG:do we have MB tree RC method */ /* what is the RC method we are now using? Default NO */ - x4->params.rc.b_mb_tree = !!(avctx->flags2 & CODEC_FLAG2_MBTREE); x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor); x4->params.rc.f_pb_factor = avctx->b_quant_factor; x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset; x4->params.analyse.b_psnr = avctx->flags & CODEC_FLAG_PSNR; x4->params.i_log_level = XAVS_LOG_DEBUG; - x4->params.b_aud = avctx->flags2 & CODEC_FLAG2_AUD; x4->params.i_threads = avctx->thread_count; x4->params.b_interlaced = avctx->flags & CODEC_FLAG_INTERLACED_DCT; @@ -336,6 +375,37 @@ static av_cold int XAVS_init(AVCodecContext *avctx) return 0; } +#define OFFSET(x) offsetof(XavsContext, x) +#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM +static const AVOption options[] = { + { "crf", "Select the quality for constant quality mode", OFFSET(crf), FF_OPT_TYPE_FLOAT, {-1 }, -1, FLT_MAX, VE }, + { "qp", "Constant quantization parameter rate control method",OFFSET(cqp), FF_OPT_TYPE_INT, {-1 }, -1, INT_MAX, VE }, + { "b-bias", "Influences how often B-frames are used", OFFSET(b_bias), FF_OPT_TYPE_INT, {INT_MIN}, INT_MIN, INT_MAX, VE }, + { "cplxblur", "Reduce fluctuations in QP (before curve compression)", OFFSET(cplxblur), FF_OPT_TYPE_FLOAT, {-1 }, -1, FLT_MAX, VE}, + { "direct-pred", "Direct MV prediction mode", OFFSET(direct_pred), FF_OPT_TYPE_INT, {-1 }, -1, INT_MAX, VE, "direct-pred" }, + { "none", NULL, 0, FF_OPT_TYPE_CONST, { XAVS_DIRECT_PRED_NONE }, 0, 0, VE, "direct-pred" }, + { "spatial", NULL, 0, FF_OPT_TYPE_CONST, { XAVS_DIRECT_PRED_SPATIAL }, 0, 0, VE, "direct-pred" }, + { "temporal", NULL, 0, FF_OPT_TYPE_CONST, { XAVS_DIRECT_PRED_TEMPORAL }, 0, 0, VE, "direct-pred" }, + { "auto", NULL, 0, FF_OPT_TYPE_CONST, { XAVS_DIRECT_PRED_AUTO }, 0, 0, VE, "direct-pred" }, + { "aud", "Use access unit delimiters.", OFFSET(aud), FF_OPT_TYPE_INT, {-1 }, -1, 1, VE}, + { "mbtree", "Use macroblock tree ratecontrol.", OFFSET(mbtree), FF_OPT_TYPE_INT, {-1 }, -1, 1, VE}, + { "mixed-refs", "One reference per partition, as opposed to one reference per macroblock", OFFSET(mixed_refs), FF_OPT_TYPE_INT, {-1}, -1, 1, VE }, + { "fast-pskip", NULL, OFFSET(fast_pskip), FF_OPT_TYPE_INT, {-1 }, -1, 1, VE}, + { NULL }, +}; + +static const AVClass class = { + .class_name = "libxavs", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + +static const AVCodecDefault xavs_defaults[] = { + { "b", "0" }, + { NULL }, +}; + AVCodec ff_libxavs_encoder = { .name = "libxavs", .type = AVMEDIA_TYPE_VIDEO, @@ -347,5 +417,7 @@ AVCodec ff_libxavs_encoder = { .capabilities = CODEC_CAP_DELAY, .pix_fmts = (const enum PixelFormat[]) { PIX_FMT_YUV420P, PIX_FMT_NONE }, .long_name = NULL_IF_CONFIG_SMALL("libxavs - the Chinese Audio Video Standard Encoder"), + .priv_class = &class, + .defaults = xavs_defaults, }; From ee42716b07073549cefabec466a33fad39ed5eb6 Mon Sep 17 00:00:00 2001 From: Stefano Sabatini Date: Sat, 23 Apr 2011 20:55:34 +0200 Subject: [PATCH 2/7] lavfi: add showinfo filter Signed-off-by: Anton Khirnov --- Changelog | 1 + doc/filters.texi | 59 +++++++++++++++++++++++ libavfilter/Makefile | 1 + libavfilter/allfilters.c | 1 + libavfilter/avfilter.h | 2 +- libavfilter/vf_showinfo.c | 98 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 161 insertions(+), 1 deletion(-) create mode 100644 libavfilter/vf_showinfo.c diff --git a/Changelog b/Changelog index 8e9ed388d9..79bc96ebf2 100644 --- a/Changelog +++ b/Changelog @@ -43,6 +43,7 @@ easier to use. The changes are: - XMV demuxer - Windows Media Image decoder - LATM muxer +- showinfo filter version 0.7: diff --git a/doc/filters.texi b/doc/filters.texi index a8076b2bf5..d8d9062bb9 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -1210,6 +1210,65 @@ settb=2*intb settb=AVTB @end example +@section showinfo + +Show a line containing various information for each input video frame. +The input video is not modified. + +The shown line contains a sequence of key/value pairs of the form +@var{key}:@var{value}. + +A description of each shown parameter follows: + +@table @option +@item n +sequential number of the input frame, starting from 0 + +@item pts +Presentation TimeStamp of the input frame, expressed as a number of +time base units. The time base unit depends on the filter input pad. + +@item pts_time +Presentation TimeStamp of the input frame, expressed as a number of +seconds + +@item pos +position of the frame in the input stream, -1 if this information in +unavailable and/or meanigless (for example in case of synthetic video) + +@item fmt +pixel format name + +@item sar +sample aspect ratio of the input frame, expressed in the form +@var{num}/@var{den} + +@item s +size of the input frame, expressed in the form +@var{width}x@var{height} + +@item i +interlaced mode ("P" for "progressive", "T" for top field first, "B" +for bottom field first) + +@item iskey +1 if the frame is a key frame, 0 otherwise + +@item type +picture type of the input frame ("I" for an I-frame, "P" for a +P-frame, "B" for a B-frame, "?" for unknown type). +Check also the documentation of the @code{AVPictureType} enum and of +the @code{av_get_picture_type_char} function defined in +@file{libavutil/avutil.h}. + +@item checksum +Adler-32 checksum of all the planes of the input frame + +@item plane_checksum +Adler-32 checksum of each plane of the input frame, expressed in the form +"[@var{c0} @var{c1} @var{c2} @var{c3}]" +@end table + @section slicify Pass the images of input video on to next video filter as multiple diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 1a6fd9b68f..162ec94292 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -44,6 +44,7 @@ OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o OBJS-$(CONFIG_SETPTS_FILTER) += vf_setpts.o OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o OBJS-$(CONFIG_SETTB_FILTER) += vf_settb.o +OBJS-$(CONFIG_SHOWINFO_FILTER) += vf_showinfo.o OBJS-$(CONFIG_SLICIFY_FILTER) += vf_slicify.o OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index e29b4f97bd..e1b9333155 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -65,6 +65,7 @@ void avfilter_register_all(void) REGISTER_FILTER (SETPTS, setpts, vf); REGISTER_FILTER (SETSAR, setsar, vf); REGISTER_FILTER (SETTB, settb, vf); + REGISTER_FILTER (SHOWINFO, showinfo, vf); REGISTER_FILTER (SLICIFY, slicify, vf); REGISTER_FILTER (TRANSPOSE, transpose, vf); REGISTER_FILTER (UNSHARP, unsharp, vf); diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h index f8295e77c5..01af7b8e36 100644 --- a/libavfilter/avfilter.h +++ b/libavfilter/avfilter.h @@ -29,7 +29,7 @@ #include "libavutil/rational.h" #define LIBAVFILTER_VERSION_MAJOR 2 -#define LIBAVFILTER_VERSION_MINOR 4 +#define LIBAVFILTER_VERSION_MINOR 5 #define LIBAVFILTER_VERSION_MICRO 0 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ diff --git a/libavfilter/vf_showinfo.c b/libavfilter/vf_showinfo.c new file mode 100644 index 0000000000..aa2a7f16f9 --- /dev/null +++ b/libavfilter/vf_showinfo.c @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2011 Stefano Sabatini + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * filter for showing textual video frame information + */ + +#include "libavutil/adler32.h" +#include "libavutil/imgutils.h" +#include "libavutil/pixdesc.h" +#include "avfilter.h" + +typedef struct { + unsigned int frame; +} ShowInfoContext; + +static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) +{ + ShowInfoContext *showinfo = ctx->priv; + showinfo->frame = 0; + return 0; +} + +static void end_frame(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + ShowInfoContext *showinfo = ctx->priv; + AVFilterBufferRef *picref = inlink->cur_buf; + uint32_t plane_checksum[4] = {0}, checksum = 0; + int i, plane, vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; + + for (plane = 0; picref->data[plane] && plane < 4; plane++) { + size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane); + uint8_t *data = picref->data[plane]; + int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h; + + for (i = 0; i < h; i++) { + plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize); + checksum = av_adler32_update(checksum, data, linesize); + data += picref->linesize[plane]; + } + } + + av_log(ctx, AV_LOG_INFO, + "n:%d pts:%"PRId64" pts_time:%f pos:%"PRId64" " + "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c " + "checksum:%u plane_checksum:[%u %u %u %u]\n", + showinfo->frame, + picref->pts, picref->pts * av_q2d(inlink->time_base), picref->pos, + av_pix_fmt_descriptors[picref->format].name, + picref->video->pixel_aspect.num, picref->video->pixel_aspect.den, + picref->video->w, picref->video->h, + !picref->video->interlaced ? 'P' : /* Progressive */ + picref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */ + picref->video->key_frame, + av_get_picture_type_char(picref->video->pict_type), + checksum, plane_checksum[0], plane_checksum[1], plane_checksum[2], plane_checksum[3]); + + showinfo->frame++; + avfilter_end_frame(inlink->dst->outputs[0]); +} + +AVFilter avfilter_vf_showinfo = { + .name = "showinfo", + .description = NULL_IF_CONFIG_SMALL("Show textual information for each video frame."), + + .priv_size = sizeof(ShowInfoContext), + .init = init, + + .inputs = (AVFilterPad[]) {{ .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = avfilter_null_get_video_buffer, + .start_frame = avfilter_null_start_frame, + .end_frame = end_frame, + .min_perms = AV_PERM_READ, }, + { .name = NULL}}, + + .outputs = (AVFilterPad[]) {{ .name = "default", + .type = AVMEDIA_TYPE_VIDEO }, + { .name = NULL}}, +}; From 615baa13e4a757776681a07432a58edad09b9333 Mon Sep 17 00:00:00 2001 From: Bobby Bingham Date: Sat, 21 May 2011 16:46:11 +0200 Subject: [PATCH 3/7] lavfi: add split filter from soc. Some fixes by Stefano. For detailed authorship see SOC repo Signed-off-by: Anton Khirnov --- Changelog | 1 + libavfilter/Makefile | 1 + libavfilter/allfilters.c | 1 + libavfilter/avfilter.h | 2 +- libavfilter/vf_split.c | 66 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 libavfilter/vf_split.c diff --git a/Changelog b/Changelog index 79bc96ebf2..8a36fdd7e0 100644 --- a/Changelog +++ b/Changelog @@ -44,6 +44,7 @@ easier to use. The changes are: - Windows Media Image decoder - LATM muxer - showinfo filter +- split filter version 0.7: diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 162ec94292..5844dd0581 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -46,6 +46,7 @@ OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o OBJS-$(CONFIG_SETTB_FILTER) += vf_settb.o OBJS-$(CONFIG_SHOWINFO_FILTER) += vf_showinfo.o OBJS-$(CONFIG_SLICIFY_FILTER) += vf_slicify.o +OBJS-$(CONFIG_SPLIT_FILTER) += vf_split.o OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index e1b9333155..11e9d7d052 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -67,6 +67,7 @@ void avfilter_register_all(void) REGISTER_FILTER (SETTB, settb, vf); REGISTER_FILTER (SHOWINFO, showinfo, vf); REGISTER_FILTER (SLICIFY, slicify, vf); + REGISTER_FILTER (SPLIT, split, vf); REGISTER_FILTER (TRANSPOSE, transpose, vf); REGISTER_FILTER (UNSHARP, unsharp, vf); REGISTER_FILTER (VFLIP, vflip, vf); diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h index 01af7b8e36..3fcc8471e1 100644 --- a/libavfilter/avfilter.h +++ b/libavfilter/avfilter.h @@ -29,7 +29,7 @@ #include "libavutil/rational.h" #define LIBAVFILTER_VERSION_MAJOR 2 -#define LIBAVFILTER_VERSION_MINOR 5 +#define LIBAVFILTER_VERSION_MINOR 6 #define LIBAVFILTER_VERSION_MICRO 0 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ diff --git a/libavfilter/vf_split.c b/libavfilter/vf_split.c new file mode 100644 index 0000000000..54fdd21588 --- /dev/null +++ b/libavfilter/vf_split.c @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2007 Bobby Bingham + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Video splitter + */ + +#include "avfilter.h" + +static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) +{ + avfilter_start_frame(inlink->dst->outputs[0], + avfilter_ref_buffer(picref, ~AV_PERM_WRITE)); + avfilter_start_frame(inlink->dst->outputs[1], + avfilter_ref_buffer(picref, ~AV_PERM_WRITE)); +} + +static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) +{ + avfilter_draw_slice(inlink->dst->outputs[0], y, h, slice_dir); + avfilter_draw_slice(inlink->dst->outputs[1], y, h, slice_dir); +} + +static void end_frame(AVFilterLink *inlink) +{ + avfilter_end_frame(inlink->dst->outputs[0]); + avfilter_end_frame(inlink->dst->outputs[1]); + + avfilter_unref_buffer(inlink->cur_buf); +} + +AVFilter avfilter_vf_split = { + .name = "split", + .description = NULL_IF_CONFIG_SMALL("Pass on the input to two outputs."), + + .inputs = (AVFilterPad[]) {{ .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer= avfilter_null_get_video_buffer, + .start_frame = start_frame, + .draw_slice = draw_slice, + .end_frame = end_frame, }, + { .name = NULL}}, + .outputs = (AVFilterPad[]) {{ .name = "output1", + .type = AVMEDIA_TYPE_VIDEO, }, + { .name = "output2", + .type = AVMEDIA_TYPE_VIDEO, }, + { .name = NULL}}, +}; From 47a8589f7bc69d1a29da1dfdfbd0dfa78a9e31fd Mon Sep 17 00:00:00 2001 From: Kostya Shishkov Date: Mon, 12 Sep 2011 09:40:42 +0200 Subject: [PATCH 4/7] smacker demuxer: handle possible av_realloc() failure. Signed-off-by: Anton Khirnov --- libavformat/smacker.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/libavformat/smacker.c b/libavformat/smacker.c index 51a3b1b8b8..274946696c 100644 --- a/libavformat/smacker.c +++ b/libavformat/smacker.c @@ -286,11 +286,16 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt) for(i = 0; i < 7; i++) { if(flags & 1) { int size; + uint8_t *tmpbuf; + size = avio_rl32(s->pb) - 4; frame_size -= size; frame_size -= 4; smk->curstream++; - smk->bufs[smk->curstream] = av_realloc(smk->bufs[smk->curstream], size); + tmpbuf = av_realloc(smk->bufs[smk->curstream], size); + if (!tmpbuf) + return AVERROR(ENOMEM); + smk->bufs[smk->curstream] = tmpbuf; smk->buf_sizes[smk->curstream] = size; ret = avio_read(s->pb, smk->bufs[smk->curstream], size); if(ret != size) From 23a1f0c59241465ba30103388029a7afc0ead909 Mon Sep 17 00:00:00 2001 From: Kostya Shishkov Date: Mon, 12 Sep 2011 11:39:53 +0200 Subject: [PATCH 5/7] rv34: check that subsequent slices have the same type as first one. This prevents some crashes when corrupted bitstream reports e.g. P-type slice in I-frame. Official RealVideo decoder demands all slices to be of the same type too. Signed-off-by: Anton Khirnov --- libavcodec/rv34.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c index 253c3952d2..80c88fb102 100644 --- a/libavcodec/rv34.c +++ b/libavcodec/rv34.c @@ -1336,6 +1336,13 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int } } s->mb_x = s->mb_y = 0; + } else { + int slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I; + + if (slice_type != s->pict_type) { + av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n"); + return AVERROR_INVALIDDATA; + } } r->si.end = end; From 57650c70e22b8259f4ac65d5826a667c8f67726e Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Mon, 12 Sep 2011 10:40:43 +0200 Subject: [PATCH 6/7] doc/avconv: fix typo. --- doc/avconv.texi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/avconv.texi b/doc/avconv.texi index 8f10fc5468..156b9c2906 100644 --- a/doc/avconv.texi +++ b/doc/avconv.texi @@ -30,7 +30,7 @@ As a general rule, options are applied to the next specified file. Therefore, order is important, and you can have the same option on the command line multiple times. Each occurrence is then applied to the next input or output file. -Exceptions from this rule are the global options (e.g. vebosity level), +Exceptions from this rule are the global options (e.g. verbosity level), which should be specified first. @itemize From 826c56d16e55f3819a75d01f957dd295aa1e9f3a Mon Sep 17 00:00:00 2001 From: Justin Ruggles Date: Wed, 7 Sep 2011 18:34:09 -0400 Subject: [PATCH 7/7] adpcm: split ADPCM encoders and decoders into separate files. Move shared tables to a separate file as well. --- libavcodec/Makefile | 40 +-- libavcodec/adpcm.c | 734 ++-------------------------------------- libavcodec/adpcm.h | 46 +++ libavcodec/adpcm_data.c | 78 +++++ libavcodec/adpcm_data.h | 37 ++ libavcodec/adpcmenc.c | 655 +++++++++++++++++++++++++++++++++++ 6 files changed, 861 insertions(+), 729 deletions(-) create mode 100644 libavcodec/adpcm.h create mode 100644 libavcodec/adpcm_data.c create mode 100644 libavcodec/adpcm_data.h create mode 100644 libavcodec/adpcmenc.c diff --git a/libavcodec/Makefile b/libavcodec/Makefile index 1bb6b090cc..7697f731b2 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -483,10 +483,10 @@ OBJS-$(CONFIG_PCM_U32LE_ENCODER) += pcm.o OBJS-$(CONFIG_PCM_ZORK_DECODER) += pcm.o OBJS-$(CONFIG_PCM_ZORK_ENCODER) += pcm.o -OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o +OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o adpcm_data.o OBJS-$(CONFIG_ADPCM_ADX_DECODER) += adxdec.o OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adxenc.o -OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o +OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o adpcm_data.o OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o OBJS-$(CONFIG_ADPCM_EA_MAXIS_XA_DECODER) += adpcm.o OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o @@ -497,29 +497,29 @@ OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o -OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_IMA_DK3_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_IMA_DK4_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_IMA_EA_EACS_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_IMA_EA_SEAD_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_IMA_ISS_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_IMA_QT_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_IMA_QT_ENCODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_IMA_SMJPEG_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_IMA_WAV_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_IMA_WAV_ENCODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_IMA_WS_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_MS_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_MS_ENCODER) += adpcm.o +OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_IMA_DK3_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_IMA_DK4_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_IMA_EA_EACS_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_IMA_EA_SEAD_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_IMA_ISS_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_IMA_QT_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_IMA_QT_ENCODER) += adpcmenc.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_IMA_SMJPEG_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_IMA_WAV_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_IMA_WAV_ENCODER) += adpcmenc.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_IMA_WS_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_MS_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_MS_ENCODER) += adpcmenc.o adpcm_data.o OBJS-$(CONFIG_ADPCM_SBPRO_2_DECODER) += adpcm.o OBJS-$(CONFIG_ADPCM_SBPRO_3_DECODER) += adpcm.o OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_SWF_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_SWF_ENCODER) += adpcm.o +OBJS-$(CONFIG_ADPCM_SWF_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_SWF_ENCODER) += adpcmenc.o adpcm_data.o OBJS-$(CONFIG_ADPCM_THP_DECODER) += adpcm.o OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o -OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcm.o +OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o adpcm_data.o +OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o # libavformat dependencies OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o diff --git a/libavcodec/adpcm.c b/libavcodec/adpcm.c index 70a5360ce8..c9ec0c3798 100644 --- a/libavcodec/adpcm.c +++ b/libavcodec/adpcm.c @@ -1,5 +1,4 @@ /* - * ADPCM codecs * Copyright (c) 2001-2003 The ffmpeg Project * * This file is part of Libav. @@ -22,10 +21,12 @@ #include "get_bits.h" #include "put_bits.h" #include "bytestream.h" +#include "adpcm.h" +#include "adpcm_data.h" /** * @file - * ADPCM codecs. + * ADPCM decoders * First version by Francois Revol (revol@free.fr) * Fringe ADPCM codecs (e.g., DK3, DK4, Westwood) * by Mike Melanson (melanson@pcisys.net) @@ -54,48 +55,6 @@ * readstr http://www.geocities.co.jp/Playtown/2004/ */ -#define BLKSIZE 1024 - -/* step_table[] and index_table[] are from the ADPCM reference source */ -/* This is the index table: */ -static const int index_table[16] = { - -1, -1, -1, -1, 2, 4, 6, 8, - -1, -1, -1, -1, 2, 4, 6, 8, -}; - -/** - * This is the step table. Note that many programs use slight deviations from - * this table, but such deviations are negligible: - */ -static const int step_table[89] = { - 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, - 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, - 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, - 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, - 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, - 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, - 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, - 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, - 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 -}; - -/* These are for MS-ADPCM */ -/* AdaptationTable[], AdaptCoeff1[], and AdaptCoeff2[] are from libsndfile */ -static const int AdaptationTable[] = { - 230, 230, 230, 230, 307, 409, 512, 614, - 768, 614, 512, 409, 307, 230, 230, 230 -}; - -/** Divided by 4 to fit in 8-bit integers */ -static const uint8_t AdaptCoeff1[] = { - 64, 128, 0, 48, 60, 115, 98 -}; - -/** Divided by 4 to fit in 8-bit integers */ -static const int8_t AdaptCoeff2[] = { - 0, -64, 0, 16, 0, -52, -58 -}; - /* These are for CD-ROM XA ADPCM */ static const int xa_adpcm_table[5][2] = { { 0, 0 }, @@ -118,632 +77,15 @@ static const int swf_index_tables[4][16] = { /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 } }; -static const int yamaha_indexscale[] = { - 230, 230, 230, 230, 307, 409, 512, 614, - 230, 230, 230, 230, 307, 409, 512, 614 -}; - -static const int yamaha_difflookup[] = { - 1, 3, 5, 7, 9, 11, 13, 15, - -1, -3, -5, -7, -9, -11, -13, -15 -}; - /* end of tables */ -typedef struct ADPCMChannelStatus { - int predictor; - short int step_index; - int step; - /* for encoding */ - int prev_sample; - - /* MS version */ - short sample1; - short sample2; - int coeff1; - int coeff2; - int idelta; -} ADPCMChannelStatus; - -typedef struct TrellisPath { - int nibble; - int prev; -} TrellisPath; - -typedef struct TrellisNode { - uint32_t ssd; - int path; - int sample1; - int sample2; - int step; -} TrellisNode; - -typedef struct ADPCMContext { +typedef struct ADPCMDecodeContext { ADPCMChannelStatus status[6]; - TrellisPath *paths; - TrellisNode *node_buf; - TrellisNode **nodep_buf; - uint8_t *trellis_hash; -} ADPCMContext; - -#define FREEZE_INTERVAL 128 - -/* XXX: implement encoding */ - -#if CONFIG_ENCODERS -static av_cold int adpcm_encode_init(AVCodecContext *avctx) -{ - ADPCMContext *s = avctx->priv_data; - uint8_t *extradata; - int i; - if (avctx->channels > 2) - return -1; /* only stereo or mono =) */ - - if(avctx->trellis && (unsigned)avctx->trellis > 16U){ - av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n"); - return -1; - } - - if (avctx->trellis) { - int frontier = 1 << avctx->trellis; - int max_paths = frontier * FREEZE_INTERVAL; - FF_ALLOC_OR_GOTO(avctx, s->paths, max_paths * sizeof(*s->paths), error); - FF_ALLOC_OR_GOTO(avctx, s->node_buf, 2 * frontier * sizeof(*s->node_buf), error); - FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, 2 * frontier * sizeof(*s->nodep_buf), error); - FF_ALLOC_OR_GOTO(avctx, s->trellis_hash, 65536 * sizeof(*s->trellis_hash), error); - } - - switch(avctx->codec->id) { - case CODEC_ID_ADPCM_IMA_WAV: - avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ - /* and we have 4 bytes per channel overhead */ - avctx->block_align = BLKSIZE; - /* seems frame_size isn't taken into account... have to buffer the samples :-( */ - break; - case CODEC_ID_ADPCM_IMA_QT: - avctx->frame_size = 64; - avctx->block_align = 34 * avctx->channels; - break; - case CODEC_ID_ADPCM_MS: - avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ - /* and we have 7 bytes per channel overhead */ - avctx->block_align = BLKSIZE; - avctx->extradata_size = 32; - extradata = avctx->extradata = av_malloc(avctx->extradata_size); - if (!extradata) - return AVERROR(ENOMEM); - bytestream_put_le16(&extradata, avctx->frame_size); - bytestream_put_le16(&extradata, 7); /* wNumCoef */ - for (i = 0; i < 7; i++) { - bytestream_put_le16(&extradata, AdaptCoeff1[i] * 4); - bytestream_put_le16(&extradata, AdaptCoeff2[i] * 4); - } - break; - case CODEC_ID_ADPCM_YAMAHA: - avctx->frame_size = BLKSIZE * avctx->channels; - avctx->block_align = BLKSIZE; - break; - case CODEC_ID_ADPCM_SWF: - if (avctx->sample_rate != 11025 && - avctx->sample_rate != 22050 && - avctx->sample_rate != 44100) { - av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n"); - goto error; - } - avctx->frame_size = 512 * (avctx->sample_rate / 11025); - break; - default: - goto error; - } - - avctx->coded_frame= avcodec_alloc_frame(); - avctx->coded_frame->key_frame= 1; - - return 0; -error: - av_freep(&s->paths); - av_freep(&s->node_buf); - av_freep(&s->nodep_buf); - av_freep(&s->trellis_hash); - return -1; -} - -static av_cold int adpcm_encode_close(AVCodecContext *avctx) -{ - ADPCMContext *s = avctx->priv_data; - av_freep(&avctx->coded_frame); - av_freep(&s->paths); - av_freep(&s->node_buf); - av_freep(&s->nodep_buf); - av_freep(&s->trellis_hash); - - return 0; -} - - -static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample) -{ - int delta = sample - c->prev_sample; - int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8; - c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8); - c->prev_sample = av_clip_int16(c->prev_sample); - c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88); - return nibble; -} - -static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample) -{ - int predictor, nibble, bias; - - predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; - - nibble= sample - predictor; - if(nibble>=0) bias= c->idelta/2; - else bias=-c->idelta/2; - - nibble= (nibble + bias) / c->idelta; - nibble= av_clip(nibble, -8, 7)&0x0F; - - predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; - - c->sample2 = c->sample1; - c->sample1 = av_clip_int16(predictor); - - c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; - if (c->idelta < 16) c->idelta = 16; - - return nibble; -} - -static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample) -{ - int nibble, delta; - - if(!c->step) { - c->predictor = 0; - c->step = 127; - } - - delta = sample - c->predictor; - - nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; - - c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8); - c->predictor = av_clip_int16(c->predictor); - c->step = (c->step * yamaha_indexscale[nibble]) >> 8; - c->step = av_clip(c->step, 127, 24567); - - return nibble; -} - -static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, - uint8_t *dst, ADPCMChannelStatus *c, int n) -{ - //FIXME 6% faster if frontier is a compile-time constant - ADPCMContext *s = avctx->priv_data; - const int frontier = 1 << avctx->trellis; - const int stride = avctx->channels; - const int version = avctx->codec->id; - TrellisPath *paths = s->paths, *p; - TrellisNode *node_buf = s->node_buf; - TrellisNode **nodep_buf = s->nodep_buf; - TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd - TrellisNode **nodes_next = nodep_buf + frontier; - int pathn = 0, froze = -1, i, j, k, generation = 0; - uint8_t *hash = s->trellis_hash; - memset(hash, 0xff, 65536 * sizeof(*hash)); - - memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf)); - nodes[0] = node_buf + frontier; - nodes[0]->ssd = 0; - nodes[0]->path = 0; - nodes[0]->step = c->step_index; - nodes[0]->sample1 = c->sample1; - nodes[0]->sample2 = c->sample2; - if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF)) - nodes[0]->sample1 = c->prev_sample; - if(version == CODEC_ID_ADPCM_MS) - nodes[0]->step = c->idelta; - if(version == CODEC_ID_ADPCM_YAMAHA) { - if(c->step == 0) { - nodes[0]->step = 127; - nodes[0]->sample1 = 0; - } else { - nodes[0]->step = c->step; - nodes[0]->sample1 = c->predictor; - } - } - - for(i=0; istep; - int nidx; - if(version == CODEC_ID_ADPCM_MS) { - const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64; - const int div = (sample - predictor) / step; - const int nmin = av_clip(div-range, -8, 6); - const int nmax = av_clip(div+range, -7, 7); - for(nidx=nmin; nidx<=nmax; nidx++) { - const int nibble = nidx & 0xf; - int dec_sample = predictor + nidx * step; -#define STORE_NODE(NAME, STEP_INDEX)\ - int d;\ - uint32_t ssd;\ - int pos;\ - TrellisNode *u;\ - uint8_t *h;\ - dec_sample = av_clip_int16(dec_sample);\ - d = sample - dec_sample;\ - ssd = nodes[j]->ssd + d*d;\ - /* Check for wraparound, skip such samples completely. \ - * Note, changing ssd to a 64 bit variable would be \ - * simpler, avoiding this check, but it's slower on \ - * x86 32 bit at the moment. */\ - if (ssd < nodes[j]->ssd)\ - goto next_##NAME;\ - /* Collapse any two states with the same previous sample value. \ - * One could also distinguish states by step and by 2nd to last - * sample, but the effects of that are negligible. - * Since nodes in the previous generation are iterated - * through a heap, they're roughly ordered from better to - * worse, but not strictly ordered. Therefore, an earlier - * node with the same sample value is better in most cases - * (and thus the current is skipped), but not strictly - * in all cases. Only skipping samples where ssd >= - * ssd of the earlier node with the same sample gives - * slightly worse quality, though, for some reason. */ \ - h = &hash[(uint16_t) dec_sample];\ - if (*h == generation)\ - goto next_##NAME;\ - if (heap_pos < frontier) {\ - pos = heap_pos++;\ - } else {\ - /* Try to replace one of the leaf nodes with the new \ - * one, but try a different slot each time. */\ - pos = (frontier >> 1) + (heap_pos & ((frontier >> 1) - 1));\ - if (ssd > nodes_next[pos]->ssd)\ - goto next_##NAME;\ - heap_pos++;\ - }\ - *h = generation;\ - u = nodes_next[pos];\ - if(!u) {\ - assert(pathn < FREEZE_INTERVAL<trellis);\ - u = t++;\ - nodes_next[pos] = u;\ - u->path = pathn++;\ - }\ - u->ssd = ssd;\ - u->step = STEP_INDEX;\ - u->sample2 = nodes[j]->sample1;\ - u->sample1 = dec_sample;\ - paths[u->path].nibble = nibble;\ - paths[u->path].prev = nodes[j]->path;\ - /* Sift the newly inserted node up in the heap to \ - * restore the heap property. */\ - while (pos > 0) {\ - int parent = (pos - 1) >> 1;\ - if (nodes_next[parent]->ssd <= ssd)\ - break;\ - FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\ - pos = parent;\ - }\ - next_##NAME:; - STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8)); - } - } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) { -#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ - const int predictor = nodes[j]->sample1;\ - const int div = (sample - predictor) * 4 / STEP_TABLE;\ - int nmin = av_clip(div-range, -7, 6);\ - int nmax = av_clip(div+range, -6, 7);\ - if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ - if(nmax<0) nmax--;\ - for(nidx=nmin; nidx<=nmax; nidx++) {\ - const int nibble = nidx<0 ? 7-nidx : nidx;\ - int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\ - STORE_NODE(NAME, STEP_INDEX);\ - } - LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88)); - } else { //CODEC_ID_ADPCM_YAMAHA - LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567)); -#undef LOOP_NODES -#undef STORE_NODE - } - } - - u = nodes; - nodes = nodes_next; - nodes_next = u; - - generation++; - if (generation == 255) { - memset(hash, 0xff, 65536 * sizeof(*hash)); - generation = 0; - } - - // prevent overflow - if(nodes[0]->ssd > (1<<28)) { - for(j=1; jssd -= nodes[0]->ssd; - nodes[0]->ssd = 0; - } - - // merge old paths to save memory - if(i == froze + FREEZE_INTERVAL) { - p = &paths[nodes[0]->path]; - for(k=i; k>froze; k--) { - dst[k] = p->nibble; - p = &paths[p->prev]; - } - froze = i; - pathn = 0; - // other nodes might use paths that don't coincide with the frozen one. - // checking which nodes do so is too slow, so just kill them all. - // this also slightly improves quality, but I don't know why. - memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*)); - } - } - - p = &paths[nodes[0]->path]; - for(i=n-1; i>froze; i--) { - dst[i] = p->nibble; - p = &paths[p->prev]; - } - - c->predictor = nodes[0]->sample1; - c->sample1 = nodes[0]->sample1; - c->sample2 = nodes[0]->sample2; - c->step_index = nodes[0]->step; - c->step = nodes[0]->step; - c->idelta = nodes[0]->step; -} - -static int adpcm_encode_frame(AVCodecContext *avctx, - unsigned char *frame, int buf_size, void *data) -{ - int n, i, st; - short *samples; - unsigned char *dst; - ADPCMContext *c = avctx->priv_data; - uint8_t *buf; - - dst = frame; - samples = (short *)data; - st= avctx->channels == 2; -/* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */ - - switch(avctx->codec->id) { - case CODEC_ID_ADPCM_IMA_WAV: - n = avctx->frame_size / 8; - c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ -/* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ - bytestream_put_le16(&dst, c->status[0].prev_sample); - *dst++ = (unsigned char)c->status[0].step_index; - *dst++ = 0; /* unknown */ - samples++; - if (avctx->channels == 2) { - c->status[1].prev_sample = (signed short)samples[0]; -/* c->status[1].step_index = 0; */ - bytestream_put_le16(&dst, c->status[1].prev_sample); - *dst++ = (unsigned char)c->status[1].step_index; - *dst++ = 0; - samples++; - } - - /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */ - if(avctx->trellis > 0) { - FF_ALLOC_OR_GOTO(avctx, buf, 2*n*8, error); - adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n*8); - if(avctx->channels == 2) - adpcm_compress_trellis(avctx, samples+1, buf + n*8, &c->status[1], n*8); - for(i=0; ichannels == 2) { - uint8_t *buf1 = buf + n*8; - *dst++ = buf1[8*i+0] | (buf1[8*i+1] << 4); - *dst++ = buf1[8*i+2] | (buf1[8*i+3] << 4); - *dst++ = buf1[8*i+4] | (buf1[8*i+5] << 4); - *dst++ = buf1[8*i+6] | (buf1[8*i+7] << 4); - } - } - av_free(buf); - } else - for (; n>0; n--) { - *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]); - *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]); - *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]); - *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]); - *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4; - dst++; - /* right channel */ - if (avctx->channels == 2) { - *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]); - *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]); - *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]); - *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; - dst++; - *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]); - *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4; - dst++; - } - samples += 8 * avctx->channels; - } - break; - case CODEC_ID_ADPCM_IMA_QT: - { - int ch, i; - PutBitContext pb; - init_put_bits(&pb, dst, buf_size*8); - - for(ch=0; chchannels; ch++){ - put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7); - put_bits(&pb, 7, c->status[ch].step_index); - if(avctx->trellis > 0) { - uint8_t buf[64]; - adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64); - for(i=0; i<64; i++) - put_bits(&pb, 4, buf[i^1]); - c->status[ch].prev_sample = c->status[ch].predictor & ~0x7F; - } else { - for (i=0; i<64; i+=2){ - int t1, t2; - t1 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]); - t2 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]); - put_bits(&pb, 4, t2); - put_bits(&pb, 4, t1); - } - c->status[ch].prev_sample &= ~0x7F; - } - } - - flush_put_bits(&pb); - dst += put_bits_count(&pb)>>3; - break; - } - case CODEC_ID_ADPCM_SWF: - { - int i; - PutBitContext pb; - init_put_bits(&pb, dst, buf_size*8); - - n = avctx->frame_size-1; - - //Store AdpcmCodeSize - put_bits(&pb, 2, 2); //Set 4bits flash adpcm format - - //Init the encoder state - for(i=0; ichannels; i++){ - c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits - put_sbits(&pb, 16, samples[i]); - put_bits(&pb, 6, c->status[i].step_index); - c->status[i].prev_sample = (signed short)samples[i]; - } - - if(avctx->trellis > 0) { - FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); - adpcm_compress_trellis(avctx, samples+2, buf, &c->status[0], n); - if (avctx->channels == 2) - adpcm_compress_trellis(avctx, samples+3, buf+n, &c->status[1], n); - for(i=0; ichannels == 2) - put_bits(&pb, 4, buf[n+i]); - } - av_free(buf); - } else { - for (i=1; iframe_size; i++) { - put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i])); - if (avctx->channels == 2) - put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1])); - } - } - flush_put_bits(&pb); - dst += put_bits_count(&pb)>>3; - break; - } - case CODEC_ID_ADPCM_MS: - for(i=0; ichannels; i++){ - int predictor=0; - - *dst++ = predictor; - c->status[i].coeff1 = AdaptCoeff1[predictor]; - c->status[i].coeff2 = AdaptCoeff2[predictor]; - } - for(i=0; ichannels; i++){ - if (c->status[i].idelta < 16) - c->status[i].idelta = 16; - - bytestream_put_le16(&dst, c->status[i].idelta); - } - for(i=0; ichannels; i++){ - c->status[i].sample2= *samples++; - } - for(i=0; ichannels; i++){ - c->status[i].sample1= *samples++; - - bytestream_put_le16(&dst, c->status[i].sample1); - } - for(i=0; ichannels; i++) - bytestream_put_le16(&dst, c->status[i].sample2); - - if(avctx->trellis > 0) { - int n = avctx->block_align - 7*avctx->channels; - FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); - if(avctx->channels == 1) { - adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); - for(i=0; istatus[0], n); - adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); - for(i=0; ichannels; iblock_align; i++) { - int nibble; - nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4; - nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++); - *dst++ = nibble; - } - break; - case CODEC_ID_ADPCM_YAMAHA: - n = avctx->frame_size / 2; - if(avctx->trellis > 0) { - FF_ALLOC_OR_GOTO(avctx, buf, 2*n*2, error); - n *= 2; - if(avctx->channels == 1) { - adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); - for(i=0; istatus[0], n); - adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); - for(i=0; ichannels; n>0; n--) { - int nibble; - nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); - nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; - *dst++ = nibble; - } - break; - default: - error: - return -1; - } - return dst - frame; -} -#endif //CONFIG_ENCODERS +} ADPCMDecodeContext; static av_cold int adpcm_decode_init(AVCodecContext * avctx) { - ADPCMContext *c = avctx->priv_data; + ADPCMDecodeContext *c = avctx->priv_data; unsigned int max_channels = 2; switch(avctx->codec->id) { @@ -786,8 +128,8 @@ static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int predictor; int sign, delta, diff, step; - step = step_table[c->step_index]; - step_index = c->step_index + index_table[(unsigned)nibble]; + step = ff_adpcm_step_table[c->step_index]; + step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble]; if (step_index < 0) step_index = 0; else if (step_index > 88) step_index = 88; @@ -816,7 +158,7 @@ static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble) c->sample2 = c->sample1; c->sample1 = av_clip_int16(predictor); - c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; + c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8; if (c->idelta < 16) c->idelta = 16; return c->sample1; @@ -837,7 +179,7 @@ static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble) c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff); c->predictor = av_clip_int16(c->predictor); /* calculate new step and clamp it to range 511..32767 */ - new_step = (AdaptationTable[nibble & 7] * c->step) >> 8; + new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8; c->step = av_clip(new_step, 511, 32767); return (short)c->predictor; @@ -870,9 +212,9 @@ static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned c c->step = 127; } - c->predictor += (c->step * yamaha_difflookup[nibble]) / 8; + c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8; c->predictor = av_clip_int16(c->predictor); - c->step = (c->step * yamaha_indexscale[nibble]) >> 8; + c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8; c->step = av_clip(c->step, 127, 24567); return c->predictor; } @@ -964,7 +306,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; - ADPCMContext *c = avctx->priv_data; + ADPCMDecodeContext *c = avctx->priv_data; ADPCMChannelStatus *cs; int n, m, channel, i; int block_predictor[2]; @@ -1030,7 +372,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, cs->step_index = 88; } - cs->step = step_table[cs->step_index]; + cs->step = ff_adpcm_step_table[cs->step_index]; samples = (short*)data + channel; @@ -1114,10 +456,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx, if (st){ c->status[1].idelta = (int16_t)bytestream_get_le16(&src); } - c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]]; - c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]]; - c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]]; - c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]]; + c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor[0]]; + c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor[0]]; + c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor[1]]; + c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor[1]]; c->status[0].sample1 = bytestream_get_le16(&src); if (st) c->status[1].sample1 = bytestream_get_le16(&src); @@ -1586,7 +928,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, for (i = 0; i < avctx->channels; i++) { // similar to IMA adpcm int delta = get_bits(&gb, nb_bits); - int step = step_table[c->status[i].step_index]; + int step = ff_adpcm_step_table[c->status[i].step_index]; long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 int k = k0; @@ -1705,44 +1047,18 @@ static int adpcm_decode_frame(AVCodecContext *avctx, } - -#if CONFIG_ENCODERS -#define ADPCM_ENCODER(id,name,long_name_) \ -AVCodec ff_ ## name ## _encoder = { \ - #name, \ - AVMEDIA_TYPE_AUDIO, \ - id, \ - sizeof(ADPCMContext), \ - adpcm_encode_init, \ - adpcm_encode_frame, \ - adpcm_encode_close, \ - NULL, \ - .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, \ - .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ -} -#else -#define ADPCM_ENCODER(id,name,long_name_) -#endif - -#if CONFIG_DECODERS #define ADPCM_DECODER(id,name,long_name_) \ AVCodec ff_ ## name ## _decoder = { \ #name, \ AVMEDIA_TYPE_AUDIO, \ id, \ - sizeof(ADPCMContext), \ + sizeof(ADPCMDecodeContext), \ adpcm_decode_init, \ NULL, \ NULL, \ adpcm_decode_frame, \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ } -#else -#define ADPCM_DECODER(id,name,long_name_) -#endif - -#define ADPCM_CODEC(id,name,long_name_) \ - ADPCM_ENCODER(id,name,long_name_); ADPCM_DECODER(id,name,long_name_) /* Note: Do not forget to add new entries to the Makefile as well. */ ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie"); @@ -1759,15 +1075,15 @@ ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS"); -ADPCM_CODEC (CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); +ADPCM_DECODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG"); -ADPCM_CODEC (CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); +ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood"); -ADPCM_CODEC (CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); +ADPCM_DECODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit"); ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit"); ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit"); -ADPCM_CODEC (CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); +ADPCM_DECODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP"); ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA"); -ADPCM_CODEC (CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha"); +ADPCM_DECODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha"); diff --git a/libavcodec/adpcm.h b/libavcodec/adpcm.h new file mode 100644 index 0000000000..aed5048d4a --- /dev/null +++ b/libavcodec/adpcm.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2001-2003 The ffmpeg Project + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * ADPCM encoder/decoder common header. + */ + +#ifndef AVCODEC_ADPCM_H +#define AVCODEC_ADPCM_H + +#define BLKSIZE 1024 + +typedef struct ADPCMChannelStatus { + int predictor; + short int step_index; + int step; + /* for encoding */ + int prev_sample; + + /* MS version */ + short sample1; + short sample2; + int coeff1; + int coeff2; + int idelta; +} ADPCMChannelStatus; + +#endif /* AVCODEC_ADPCM_H */ diff --git a/libavcodec/adpcm_data.c b/libavcodec/adpcm_data.c new file mode 100644 index 0000000000..9dc5670bfc --- /dev/null +++ b/libavcodec/adpcm_data.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2001-2003 The ffmpeg Project + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * ADPCM tables + */ + +#include + +/* ff_adpcm_step_table[] and ff_adpcm_index_table[] are from the ADPCM + reference source */ +/* This is the index table: */ +const int8_t ff_adpcm_index_table[16] = { + -1, -1, -1, -1, 2, 4, 6, 8, + -1, -1, -1, -1, 2, 4, 6, 8, +}; + +/** + * This is the step table. Note that many programs use slight deviations from + * this table, but such deviations are negligible: + */ +const int16_t ff_adpcm_step_table[89] = { + 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, + 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, + 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, + 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, + 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, + 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, + 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, + 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, + 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 +}; + +/* These are for MS-ADPCM */ +/* ff_adpcm_AdaptationTable[], ff_adpcm_AdaptCoeff1[], and + ff_adpcm_AdaptCoeff2[] are from libsndfile */ +const int16_t ff_adpcm_AdaptationTable[] = { + 230, 230, 230, 230, 307, 409, 512, 614, + 768, 614, 512, 409, 307, 230, 230, 230 +}; + +/** Divided by 4 to fit in 8-bit integers */ +const uint8_t ff_adpcm_AdaptCoeff1[] = { + 64, 128, 0, 48, 60, 115, 98 +}; + +/** Divided by 4 to fit in 8-bit integers */ +const int8_t ff_adpcm_AdaptCoeff2[] = { + 0, -64, 0, 16, 0, -52, -58 +}; + +const int16_t ff_adpcm_yamaha_indexscale[] = { + 230, 230, 230, 230, 307, 409, 512, 614, + 230, 230, 230, 230, 307, 409, 512, 614 +}; + +const int8_t ff_adpcm_yamaha_difflookup[] = { + 1, 3, 5, 7, 9, 11, 13, 15, + -1, -3, -5, -7, -9, -11, -13, -15 +}; diff --git a/libavcodec/adpcm_data.h b/libavcodec/adpcm_data.h new file mode 100644 index 0000000000..baca426537 --- /dev/null +++ b/libavcodec/adpcm_data.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2001-2003 The ffmpeg Project + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * ADPCM tables + */ + +#ifndef AVCODEC_ADPCM_DATA_H +#define AVCODEC_ADPCM_DATA_H + +extern const int8_t ff_adpcm_index_table[16]; +extern const int16_t ff_adpcm_step_table[89]; +extern const int16_t ff_adpcm_AdaptationTable[]; +extern const uint8_t ff_adpcm_AdaptCoeff1[]; +extern const int8_t ff_adpcm_AdaptCoeff2[]; +extern const int16_t ff_adpcm_yamaha_indexscale[]; +extern const int8_t ff_adpcm_yamaha_difflookup[]; + +#endif /* AVCODEC_ADPCM_DATA_H */ diff --git a/libavcodec/adpcmenc.c b/libavcodec/adpcmenc.c new file mode 100644 index 0000000000..ec062849bd --- /dev/null +++ b/libavcodec/adpcmenc.c @@ -0,0 +1,655 @@ +/* + * Copyright (c) 2001-2003 The ffmpeg Project + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "avcodec.h" +#include "get_bits.h" +#include "put_bits.h" +#include "bytestream.h" +#include "adpcm.h" +#include "adpcm_data.h" + +/** + * @file + * ADPCM encoders + * First version by Francois Revol (revol@free.fr) + * Fringe ADPCM codecs (e.g., DK3, DK4, Westwood) + * by Mike Melanson (melanson@pcisys.net) + * + * Reference documents: + * http://www.pcisys.net/~melanson/codecs/simpleaudio.html + * http://www.geocities.com/SiliconValley/8682/aud3.txt + * http://openquicktime.sourceforge.net/plugins.htm + * XAnim sources (xa_codec.c) http://www.rasnaimaging.com/people/lapus/download.html + * http://www.cs.ucla.edu/~leec/mediabench/applications.html + * SoX source code http://home.sprynet.com/~cbagwell/sox.html + */ + +typedef struct TrellisPath { + int nibble; + int prev; +} TrellisPath; + +typedef struct TrellisNode { + uint32_t ssd; + int path; + int sample1; + int sample2; + int step; +} TrellisNode; + +typedef struct ADPCMEncodeContext { + ADPCMChannelStatus status[6]; + TrellisPath *paths; + TrellisNode *node_buf; + TrellisNode **nodep_buf; + uint8_t *trellis_hash; +} ADPCMEncodeContext; + +#define FREEZE_INTERVAL 128 + +static av_cold int adpcm_encode_init(AVCodecContext *avctx) +{ + ADPCMEncodeContext *s = avctx->priv_data; + uint8_t *extradata; + int i; + if (avctx->channels > 2) + return -1; /* only stereo or mono =) */ + + if(avctx->trellis && (unsigned)avctx->trellis > 16U){ + av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n"); + return -1; + } + + if (avctx->trellis) { + int frontier = 1 << avctx->trellis; + int max_paths = frontier * FREEZE_INTERVAL; + FF_ALLOC_OR_GOTO(avctx, s->paths, max_paths * sizeof(*s->paths), error); + FF_ALLOC_OR_GOTO(avctx, s->node_buf, 2 * frontier * sizeof(*s->node_buf), error); + FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, 2 * frontier * sizeof(*s->nodep_buf), error); + FF_ALLOC_OR_GOTO(avctx, s->trellis_hash, 65536 * sizeof(*s->trellis_hash), error); + } + + switch(avctx->codec->id) { + case CODEC_ID_ADPCM_IMA_WAV: + avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ + /* and we have 4 bytes per channel overhead */ + avctx->block_align = BLKSIZE; + /* seems frame_size isn't taken into account... have to buffer the samples :-( */ + break; + case CODEC_ID_ADPCM_IMA_QT: + avctx->frame_size = 64; + avctx->block_align = 34 * avctx->channels; + break; + case CODEC_ID_ADPCM_MS: + avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ + /* and we have 7 bytes per channel overhead */ + avctx->block_align = BLKSIZE; + avctx->extradata_size = 32; + extradata = avctx->extradata = av_malloc(avctx->extradata_size); + if (!extradata) + return AVERROR(ENOMEM); + bytestream_put_le16(&extradata, avctx->frame_size); + bytestream_put_le16(&extradata, 7); /* wNumCoef */ + for (i = 0; i < 7; i++) { + bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4); + bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4); + } + break; + case CODEC_ID_ADPCM_YAMAHA: + avctx->frame_size = BLKSIZE * avctx->channels; + avctx->block_align = BLKSIZE; + break; + case CODEC_ID_ADPCM_SWF: + if (avctx->sample_rate != 11025 && + avctx->sample_rate != 22050 && + avctx->sample_rate != 44100) { + av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n"); + goto error; + } + avctx->frame_size = 512 * (avctx->sample_rate / 11025); + break; + default: + goto error; + } + + avctx->coded_frame= avcodec_alloc_frame(); + avctx->coded_frame->key_frame= 1; + + return 0; +error: + av_freep(&s->paths); + av_freep(&s->node_buf); + av_freep(&s->nodep_buf); + av_freep(&s->trellis_hash); + return -1; +} + +static av_cold int adpcm_encode_close(AVCodecContext *avctx) +{ + ADPCMEncodeContext *s = avctx->priv_data; + av_freep(&avctx->coded_frame); + av_freep(&s->paths); + av_freep(&s->node_buf); + av_freep(&s->nodep_buf); + av_freep(&s->trellis_hash); + + return 0; +} + + +static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample) +{ + int delta = sample - c->prev_sample; + int nibble = FFMIN(7, abs(delta)*4/ff_adpcm_step_table[c->step_index]) + (delta<0)*8; + c->prev_sample += ((ff_adpcm_step_table[c->step_index] * ff_adpcm_yamaha_difflookup[nibble]) / 8); + c->prev_sample = av_clip_int16(c->prev_sample); + c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); + return nibble; +} + +static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample) +{ + int predictor, nibble, bias; + + predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; + + nibble= sample - predictor; + if(nibble>=0) bias= c->idelta/2; + else bias=-c->idelta/2; + + nibble= (nibble + bias) / c->idelta; + nibble= av_clip(nibble, -8, 7)&0x0F; + + predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; + + c->sample2 = c->sample1; + c->sample1 = av_clip_int16(predictor); + + c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8; + if (c->idelta < 16) c->idelta = 16; + + return nibble; +} + +static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample) +{ + int nibble, delta; + + if(!c->step) { + c->predictor = 0; + c->step = 127; + } + + delta = sample - c->predictor; + + nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; + + c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8); + c->predictor = av_clip_int16(c->predictor); + c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8; + c->step = av_clip(c->step, 127, 24567); + + return nibble; +} + +static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, + uint8_t *dst, ADPCMChannelStatus *c, int n) +{ + //FIXME 6% faster if frontier is a compile-time constant + ADPCMEncodeContext *s = avctx->priv_data; + const int frontier = 1 << avctx->trellis; + const int stride = avctx->channels; + const int version = avctx->codec->id; + TrellisPath *paths = s->paths, *p; + TrellisNode *node_buf = s->node_buf; + TrellisNode **nodep_buf = s->nodep_buf; + TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd + TrellisNode **nodes_next = nodep_buf + frontier; + int pathn = 0, froze = -1, i, j, k, generation = 0; + uint8_t *hash = s->trellis_hash; + memset(hash, 0xff, 65536 * sizeof(*hash)); + + memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf)); + nodes[0] = node_buf + frontier; + nodes[0]->ssd = 0; + nodes[0]->path = 0; + nodes[0]->step = c->step_index; + nodes[0]->sample1 = c->sample1; + nodes[0]->sample2 = c->sample2; + if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF)) + nodes[0]->sample1 = c->prev_sample; + if(version == CODEC_ID_ADPCM_MS) + nodes[0]->step = c->idelta; + if(version == CODEC_ID_ADPCM_YAMAHA) { + if(c->step == 0) { + nodes[0]->step = 127; + nodes[0]->sample1 = 0; + } else { + nodes[0]->step = c->step; + nodes[0]->sample1 = c->predictor; + } + } + + for(i=0; istep; + int nidx; + if(version == CODEC_ID_ADPCM_MS) { + const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64; + const int div = (sample - predictor) / step; + const int nmin = av_clip(div-range, -8, 6); + const int nmax = av_clip(div+range, -7, 7); + for(nidx=nmin; nidx<=nmax; nidx++) { + const int nibble = nidx & 0xf; + int dec_sample = predictor + nidx * step; +#define STORE_NODE(NAME, STEP_INDEX)\ + int d;\ + uint32_t ssd;\ + int pos;\ + TrellisNode *u;\ + uint8_t *h;\ + dec_sample = av_clip_int16(dec_sample);\ + d = sample - dec_sample;\ + ssd = nodes[j]->ssd + d*d;\ + /* Check for wraparound, skip such samples completely. \ + * Note, changing ssd to a 64 bit variable would be \ + * simpler, avoiding this check, but it's slower on \ + * x86 32 bit at the moment. */\ + if (ssd < nodes[j]->ssd)\ + goto next_##NAME;\ + /* Collapse any two states with the same previous sample value. \ + * One could also distinguish states by step and by 2nd to last + * sample, but the effects of that are negligible. + * Since nodes in the previous generation are iterated + * through a heap, they're roughly ordered from better to + * worse, but not strictly ordered. Therefore, an earlier + * node with the same sample value is better in most cases + * (and thus the current is skipped), but not strictly + * in all cases. Only skipping samples where ssd >= + * ssd of the earlier node with the same sample gives + * slightly worse quality, though, for some reason. */ \ + h = &hash[(uint16_t) dec_sample];\ + if (*h == generation)\ + goto next_##NAME;\ + if (heap_pos < frontier) {\ + pos = heap_pos++;\ + } else {\ + /* Try to replace one of the leaf nodes with the new \ + * one, but try a different slot each time. */\ + pos = (frontier >> 1) + (heap_pos & ((frontier >> 1) - 1));\ + if (ssd > nodes_next[pos]->ssd)\ + goto next_##NAME;\ + heap_pos++;\ + }\ + *h = generation;\ + u = nodes_next[pos];\ + if(!u) {\ + assert(pathn < FREEZE_INTERVAL<trellis);\ + u = t++;\ + nodes_next[pos] = u;\ + u->path = pathn++;\ + }\ + u->ssd = ssd;\ + u->step = STEP_INDEX;\ + u->sample2 = nodes[j]->sample1;\ + u->sample1 = dec_sample;\ + paths[u->path].nibble = nibble;\ + paths[u->path].prev = nodes[j]->path;\ + /* Sift the newly inserted node up in the heap to \ + * restore the heap property. */\ + while (pos > 0) {\ + int parent = (pos - 1) >> 1;\ + if (nodes_next[parent]->ssd <= ssd)\ + break;\ + FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\ + pos = parent;\ + }\ + next_##NAME:; + STORE_NODE(ms, FFMAX(16, (ff_adpcm_AdaptationTable[nibble] * step) >> 8)); + } + } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) { +#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ + const int predictor = nodes[j]->sample1;\ + const int div = (sample - predictor) * 4 / STEP_TABLE;\ + int nmin = av_clip(div-range, -7, 6);\ + int nmax = av_clip(div+range, -6, 7);\ + if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ + if(nmax<0) nmax--;\ + for(nidx=nmin; nidx<=nmax; nidx++) {\ + const int nibble = nidx<0 ? 7-nidx : nidx;\ + int dec_sample = predictor + (STEP_TABLE * ff_adpcm_yamaha_difflookup[nibble]) / 8;\ + STORE_NODE(NAME, STEP_INDEX);\ + } + LOOP_NODES(ima, ff_adpcm_step_table[step], av_clip(step + ff_adpcm_index_table[nibble], 0, 88)); + } else { //CODEC_ID_ADPCM_YAMAHA + LOOP_NODES(yamaha, step, av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8, 127, 24567)); +#undef LOOP_NODES +#undef STORE_NODE + } + } + + u = nodes; + nodes = nodes_next; + nodes_next = u; + + generation++; + if (generation == 255) { + memset(hash, 0xff, 65536 * sizeof(*hash)); + generation = 0; + } + + // prevent overflow + if(nodes[0]->ssd > (1<<28)) { + for(j=1; jssd -= nodes[0]->ssd; + nodes[0]->ssd = 0; + } + + // merge old paths to save memory + if(i == froze + FREEZE_INTERVAL) { + p = &paths[nodes[0]->path]; + for(k=i; k>froze; k--) { + dst[k] = p->nibble; + p = &paths[p->prev]; + } + froze = i; + pathn = 0; + // other nodes might use paths that don't coincide with the frozen one. + // checking which nodes do so is too slow, so just kill them all. + // this also slightly improves quality, but I don't know why. + memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*)); + } + } + + p = &paths[nodes[0]->path]; + for(i=n-1; i>froze; i--) { + dst[i] = p->nibble; + p = &paths[p->prev]; + } + + c->predictor = nodes[0]->sample1; + c->sample1 = nodes[0]->sample1; + c->sample2 = nodes[0]->sample2; + c->step_index = nodes[0]->step; + c->step = nodes[0]->step; + c->idelta = nodes[0]->step; +} + +static int adpcm_encode_frame(AVCodecContext *avctx, + unsigned char *frame, int buf_size, void *data) +{ + int n, i, st; + short *samples; + unsigned char *dst; + ADPCMEncodeContext *c = avctx->priv_data; + uint8_t *buf; + + dst = frame; + samples = (short *)data; + st= avctx->channels == 2; +/* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */ + + switch(avctx->codec->id) { + case CODEC_ID_ADPCM_IMA_WAV: + n = avctx->frame_size / 8; + c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ +/* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ + bytestream_put_le16(&dst, c->status[0].prev_sample); + *dst++ = (unsigned char)c->status[0].step_index; + *dst++ = 0; /* unknown */ + samples++; + if (avctx->channels == 2) { + c->status[1].prev_sample = (signed short)samples[0]; +/* c->status[1].step_index = 0; */ + bytestream_put_le16(&dst, c->status[1].prev_sample); + *dst++ = (unsigned char)c->status[1].step_index; + *dst++ = 0; + samples++; + } + + /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */ + if(avctx->trellis > 0) { + FF_ALLOC_OR_GOTO(avctx, buf, 2*n*8, error); + adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n*8); + if(avctx->channels == 2) + adpcm_compress_trellis(avctx, samples+1, buf + n*8, &c->status[1], n*8); + for(i=0; ichannels == 2) { + uint8_t *buf1 = buf + n*8; + *dst++ = buf1[8*i+0] | (buf1[8*i+1] << 4); + *dst++ = buf1[8*i+2] | (buf1[8*i+3] << 4); + *dst++ = buf1[8*i+4] | (buf1[8*i+5] << 4); + *dst++ = buf1[8*i+6] | (buf1[8*i+7] << 4); + } + } + av_free(buf); + } else + for (; n>0; n--) { + *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]); + *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4; + dst++; + *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]); + *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4; + dst++; + *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]); + *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4; + dst++; + *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]); + *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4; + dst++; + /* right channel */ + if (avctx->channels == 2) { + *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]); + *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4; + dst++; + *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]); + *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4; + dst++; + *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]); + *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; + dst++; + *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]); + *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4; + dst++; + } + samples += 8 * avctx->channels; + } + break; + case CODEC_ID_ADPCM_IMA_QT: + { + int ch, i; + PutBitContext pb; + init_put_bits(&pb, dst, buf_size*8); + + for(ch=0; chchannels; ch++){ + put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7); + put_bits(&pb, 7, c->status[ch].step_index); + if(avctx->trellis > 0) { + uint8_t buf[64]; + adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64); + for(i=0; i<64; i++) + put_bits(&pb, 4, buf[i^1]); + c->status[ch].prev_sample = c->status[ch].predictor & ~0x7F; + } else { + for (i=0; i<64; i+=2){ + int t1, t2; + t1 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]); + t2 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]); + put_bits(&pb, 4, t2); + put_bits(&pb, 4, t1); + } + c->status[ch].prev_sample &= ~0x7F; + } + } + + flush_put_bits(&pb); + dst += put_bits_count(&pb)>>3; + break; + } + case CODEC_ID_ADPCM_SWF: + { + int i; + PutBitContext pb; + init_put_bits(&pb, dst, buf_size*8); + + n = avctx->frame_size-1; + + //Store AdpcmCodeSize + put_bits(&pb, 2, 2); //Set 4bits flash adpcm format + + //Init the encoder state + for(i=0; ichannels; i++){ + c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits + put_sbits(&pb, 16, samples[i]); + put_bits(&pb, 6, c->status[i].step_index); + c->status[i].prev_sample = (signed short)samples[i]; + } + + if(avctx->trellis > 0) { + FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); + adpcm_compress_trellis(avctx, samples+2, buf, &c->status[0], n); + if (avctx->channels == 2) + adpcm_compress_trellis(avctx, samples+3, buf+n, &c->status[1], n); + for(i=0; ichannels == 2) + put_bits(&pb, 4, buf[n+i]); + } + av_free(buf); + } else { + for (i=1; iframe_size; i++) { + put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i])); + if (avctx->channels == 2) + put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1])); + } + } + flush_put_bits(&pb); + dst += put_bits_count(&pb)>>3; + break; + } + case CODEC_ID_ADPCM_MS: + for(i=0; ichannels; i++){ + int predictor=0; + + *dst++ = predictor; + c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor]; + c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor]; + } + for(i=0; ichannels; i++){ + if (c->status[i].idelta < 16) + c->status[i].idelta = 16; + + bytestream_put_le16(&dst, c->status[i].idelta); + } + for(i=0; ichannels; i++){ + c->status[i].sample2= *samples++; + } + for(i=0; ichannels; i++){ + c->status[i].sample1= *samples++; + + bytestream_put_le16(&dst, c->status[i].sample1); + } + for(i=0; ichannels; i++) + bytestream_put_le16(&dst, c->status[i].sample2); + + if(avctx->trellis > 0) { + int n = avctx->block_align - 7*avctx->channels; + FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); + if(avctx->channels == 1) { + adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); + for(i=0; istatus[0], n); + adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); + for(i=0; ichannels; iblock_align; i++) { + int nibble; + nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4; + nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++); + *dst++ = nibble; + } + break; + case CODEC_ID_ADPCM_YAMAHA: + n = avctx->frame_size / 2; + if(avctx->trellis > 0) { + FF_ALLOC_OR_GOTO(avctx, buf, 2*n*2, error); + n *= 2; + if(avctx->channels == 1) { + adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); + for(i=0; istatus[0], n); + adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); + for(i=0; ichannels; n>0; n--) { + int nibble; + nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); + nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; + *dst++ = nibble; + } + break; + default: + error: + return -1; + } + return dst - frame; +} + + +#define ADPCM_ENCODER(id,name,long_name_) \ +AVCodec ff_ ## name ## _encoder = { \ + #name, \ + AVMEDIA_TYPE_AUDIO, \ + id, \ + sizeof(ADPCMEncodeContext), \ + adpcm_encode_init, \ + adpcm_encode_frame, \ + adpcm_encode_close, \ + NULL, \ + .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, \ + .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ +} + +ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); +ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); +ADPCM_ENCODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); +ADPCM_ENCODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); +ADPCM_ENCODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");