1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  avcodec: add support for planar signed 8-bit PCM.
  ra144enc: add sample_fmts list to ff_ra_144_encoder
  smackaud: use uint8_t* for 8-bit output buffer type
  smackaud: clip output samples
  smackaud: use sign_extend() for difference value instead of casting
  sipr: use a function pointer to select the decode_frame function
  sipr: set mode based on block_align instead of bit_rate
  sipr: do not needlessly set *data_size to 0 when returning an error
  ra288: fix formatting of LOCAL_ALIGNED_16
  udp: Allow specifying the local IP address
  VC1: Add bottom field offset to block_index[] to avoid rewriting (+10L)
  vc1dec: move an if() block.
  vc1dec: use correct hybrid prediction threshold.
  vc1dec: Partial rewrite of vc1_pred_mv()
  vc1dec: take ME precision into account while scaling MV predictors.
  lavf: don't leak corrupted packets

Conflicts:
	libavcodec/8svx.c
	libavcodec/ra288.c
	libavcodec/version.h
	libavformat/iff.c
	libavformat/udp.c
	libavformat/utils.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2011-11-10 03:09:46 +01:00
commit afc0a24d7d
14 changed files with 175 additions and 177 deletions

View File

@ -446,6 +446,11 @@ set the UDP buffer size in bytes
@item localport=@var{port} @item localport=@var{port}
override the local UDP port to bind with override the local UDP port to bind with
@item localaddr=@var{addr}
Choose the local IP address. This is useful e.g. if sending multicast
and the host has multiple interfaces, where the user can choose
which interface to send on by specifying the IP address of that interface.
@item pkt_size=@var{size} @item pkt_size=@var{size}
set the size in bytes of UDP packets set the size in bytes of UDP packets

View File

@ -110,7 +110,7 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_si
if (!esc->samples && avpkt) { if (!esc->samples && avpkt) {
uint8_t *deinterleaved_samples; uint8_t *deinterleaved_samples;
esc->samples_size = avctx->codec->id == CODEC_ID_8SVX_RAW ? esc->samples_size = avctx->codec->id == CODEC_ID_8SVX_RAW || avctx->codec->id ==CODEC_ID_PCM_S8_PLANAR?
avpkt->size : avctx->channels + (avpkt->size-avctx->channels) * 2; avpkt->size : avctx->channels + (avpkt->size-avctx->channels) * 2;
if (!(esc->samples = av_malloc(esc->samples_size))) if (!(esc->samples = av_malloc(esc->samples_size)))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -168,7 +168,7 @@ static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
{ {
EightSvxContext *esc = avctx->priv_data; EightSvxContext *esc = avctx->priv_data;
if (avctx->channels > 2) { if (avctx->channels < 1 || avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR, "8SVX does not support more than 2 channels\n"); av_log(avctx, AV_LOG_ERROR, "8SVX does not support more than 2 channels\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
@ -176,6 +176,7 @@ static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
switch (avctx->codec->id) { switch (avctx->codec->id) {
case CODEC_ID_8SVX_FIB: esc->table = fibonacci; break; case CODEC_ID_8SVX_FIB: esc->table = fibonacci; break;
case CODEC_ID_8SVX_EXP: esc->table = exponential; break; case CODEC_ID_8SVX_EXP: esc->table = exponential; break;
case CODEC_ID_PCM_S8_PLANAR:
case CODEC_ID_8SVX_RAW: esc->table = NULL; break; case CODEC_ID_8SVX_RAW: esc->table = NULL; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "Invalid codec id %d.\n", avctx->codec->id); av_log(avctx, AV_LOG_ERROR, "Invalid codec id %d.\n", avctx->codec->id);
@ -219,13 +220,13 @@ AVCodec ff_eightsvx_exp_decoder = {
.long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"), .long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"),
}; };
AVCodec ff_eightsvx_raw_decoder = { AVCodec ff_pcm_s8_planar_decoder = {
.name = "8svx_raw", .name = "pcm_s8_planar",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_8SVX_RAW, .id = CODEC_ID_PCM_S8_PLANAR,
.priv_data_size = sizeof(EightSvxContext), .priv_data_size = sizeof(EightSvxContext),
.init = eightsvx_decode_init, .init = eightsvx_decode_init,
.decode = eightsvx_decode_frame, .close = eightsvx_decode_close,
.close = eightsvx_decode_close, .decode = eightsvx_decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("8SVX rawaudio"), .long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"),
}; };

View File

@ -485,6 +485,7 @@ OBJS-$(CONFIG_PCM_MULAW_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_MULAW_ENCODER) += pcm.o OBJS-$(CONFIG_PCM_MULAW_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S8_DECODER) += pcm.o OBJS-$(CONFIG_PCM_S8_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S8_ENCODER) += pcm.o OBJS-$(CONFIG_PCM_S8_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S8_PLANAR_DECODER) += 8svx.o
OBJS-$(CONFIG_PCM_S16BE_DECODER) += pcm.o OBJS-$(CONFIG_PCM_S16BE_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S16BE_ENCODER) += pcm.o OBJS-$(CONFIG_PCM_S16BE_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S16LE_DECODER) += pcm.o OBJS-$(CONFIG_PCM_S16LE_DECODER) += pcm.o

View File

@ -107,7 +107,6 @@ void avcodec_register_all(void)
REGISTER_DECODER (EIGHTBPS, eightbps); REGISTER_DECODER (EIGHTBPS, eightbps);
REGISTER_DECODER (EIGHTSVX_EXP, eightsvx_exp); REGISTER_DECODER (EIGHTSVX_EXP, eightsvx_exp);
REGISTER_DECODER (EIGHTSVX_FIB, eightsvx_fib); REGISTER_DECODER (EIGHTSVX_FIB, eightsvx_fib);
REGISTER_DECODER (EIGHTSVX_RAW, eightsvx_raw);
REGISTER_DECODER (ESCAPE124, escape124); REGISTER_DECODER (ESCAPE124, escape124);
REGISTER_ENCDEC (FFV1, ffv1); REGISTER_ENCDEC (FFV1, ffv1);
REGISTER_ENCDEC (FFVHUFF, ffvhuff); REGISTER_ENCDEC (FFVHUFF, ffvhuff);
@ -318,6 +317,7 @@ void avcodec_register_all(void)
REGISTER_DECODER (PCM_LXF, pcm_lxf); REGISTER_DECODER (PCM_LXF, pcm_lxf);
REGISTER_ENCDEC (PCM_MULAW, pcm_mulaw); REGISTER_ENCDEC (PCM_MULAW, pcm_mulaw);
REGISTER_ENCDEC (PCM_S8, pcm_s8); REGISTER_ENCDEC (PCM_S8, pcm_s8);
REGISTER_DECODER (PCM_S8_PLANAR, pcm_s8_planar);
REGISTER_ENCDEC (PCM_S16BE, pcm_s16be); REGISTER_ENCDEC (PCM_S16BE, pcm_s16be);
REGISTER_ENCDEC (PCM_S16LE, pcm_s16le); REGISTER_ENCDEC (PCM_S16LE, pcm_s16le);
REGISTER_DECODER (PCM_S16LE_PLANAR, pcm_s16le_planar); REGISTER_DECODER (PCM_S16LE_PLANAR, pcm_s16le_planar);

View File

@ -254,6 +254,7 @@ enum CodecID {
CODEC_ID_PCM_BLURAY, CODEC_ID_PCM_BLURAY,
CODEC_ID_PCM_LXF, CODEC_ID_PCM_LXF,
CODEC_ID_S302M, CODEC_ID_S302M,
CODEC_ID_PCM_S8_PLANAR,
/* various ADPCM codecs */ /* various ADPCM codecs */
CODEC_ID_ADPCM_IMA_QT = 0x11000, CODEC_ID_ADPCM_IMA_QT = 0x11000,

View File

@ -516,5 +516,7 @@ AVCodec ff_ra_144_encoder = {
.init = ra144_encode_init, .init = ra144_encode_init,
.encode = ra144_encode_frame, .encode = ra144_encode_frame,
.close = ra144_encode_close, .close = ra144_encode_close,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K) encoder"), .long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K) encoder"),
}; };

View File

@ -129,8 +129,8 @@ static void do_hybrid_window(RA288Context *ractx,
float buffer1[MAX_BACKWARD_FILTER_ORDER + 1]; float buffer1[MAX_BACKWARD_FILTER_ORDER + 1];
float buffer2[MAX_BACKWARD_FILTER_ORDER + 1]; float buffer2[MAX_BACKWARD_FILTER_ORDER + 1];
LOCAL_ALIGNED_16(float, work, [FFALIGN(MAX_BACKWARD_FILTER_ORDER + LOCAL_ALIGNED_16(float, work, [FFALIGN(MAX_BACKWARD_FILTER_ORDER +
MAX_BACKWARD_FILTER_LEN + MAX_BACKWARD_FILTER_LEN +
MAX_BACKWARD_FILTER_NONREC, 8)]); MAX_BACKWARD_FILTER_NONREC, 8)]);
ractx->dsp.vector_fmul(work, window, hist, FFALIGN(order + n + non_rec, 8)); ractx->dsp.vector_fmul(work, window, hist, FFALIGN(order + n + non_rec, 8));

View File

@ -480,15 +480,24 @@ static av_cold int sipr_decoder_init(AVCodecContext * avctx)
SiprContext *ctx = avctx->priv_data; SiprContext *ctx = avctx->priv_data;
int i; int i;
if (avctx->bit_rate > 12200) ctx->mode = MODE_16k; switch (avctx->block_align) {
else if (avctx->bit_rate > 7500 ) ctx->mode = MODE_8k5; case 20: ctx->mode = MODE_16k; break;
else if (avctx->bit_rate > 5750 ) ctx->mode = MODE_6k5; case 19: ctx->mode = MODE_8k5; break;
else ctx->mode = MODE_5k0; case 29: ctx->mode = MODE_6k5; break;
case 37: ctx->mode = MODE_5k0; break;
default:
av_log(avctx, AV_LOG_ERROR, "Invalid block_align: %d\n", avctx->block_align);
return AVERROR(EINVAL);
}
av_log(avctx, AV_LOG_DEBUG, "Mode: %s\n", modes[ctx->mode].mode_name); av_log(avctx, AV_LOG_DEBUG, "Mode: %s\n", modes[ctx->mode].mode_name);
if (ctx->mode == MODE_16k) if (ctx->mode == MODE_16k) {
ff_sipr_init_16k(ctx); ff_sipr_init_16k(ctx);
ctx->decode_frame = ff_sipr_decode_frame_16k;
} else {
ctx->decode_frame = decode_frame;
}
for (i = 0; i < LP_FILTER_ORDER; i++) for (i = 0; i < LP_FILTER_ORDER; i++)
ctx->lsp_history[i] = cos((i+1) * M_PI / (LP_FILTER_ORDER + 1)); ctx->lsp_history[i] = cos((i+1) * M_PI / (LP_FILTER_ORDER + 1));
@ -518,8 +527,6 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *datap,
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"Error processing packet: packet size (%d) too small\n", "Error processing packet: packet size (%d) too small\n",
avpkt->size); avpkt->size);
*data_size = 0;
return -1; return -1;
} }
@ -530,8 +537,6 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *datap,
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"Error processing packet: output buffer (%d) too small\n", "Error processing packet: output buffer (%d) too small\n",
*data_size); *data_size);
*data_size = 0;
return -1; return -1;
} }
@ -540,10 +545,7 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *datap,
for (i = 0; i < mode_par->frames_per_packet; i++) { for (i = 0; i < mode_par->frames_per_packet; i++) {
decode_parameters(&parm, &gb, mode_par); decode_parameters(&parm, &gb, mode_par);
if (ctx->mode == MODE_16k) ctx->decode_frame(ctx, &parm, data);
ff_sipr_decode_frame_16k(ctx, &parm, data);
else
decode_frame(ctx, &parm, data);
data += subframe_size * mode_par->subframe_count; data += subframe_size * mode_par->subframe_count;
} }

View File

@ -53,8 +53,18 @@ typedef enum {
MODE_COUNT MODE_COUNT
} SiprMode; } SiprMode;
typedef struct { typedef struct SiprParameters {
int ma_pred_switch; ///< switched moving average predictor
int vq_indexes[5];
int pitch_delay[5]; ///< pitch delay
int gp_index[5]; ///< adaptive-codebook gain indexes
int16_t fc_indexes[5][10]; ///< fixed-codebook indexes
int gc_index[5]; ///< fixed-codebook gain indexes
} SiprParameters;
typedef struct SiprContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
SiprMode mode; SiprMode mode;
@ -85,16 +95,10 @@ typedef struct {
float mem_preemph[LP_FILTER_ORDER_16k]; float mem_preemph[LP_FILTER_ORDER_16k];
float synth[LP_FILTER_ORDER_16k]; float synth[LP_FILTER_ORDER_16k];
double lsp_history_16k[16]; double lsp_history_16k[16];
} SiprContext;
typedef struct { void (*decode_frame)(struct SiprContext *ctx, SiprParameters *params,
int ma_pred_switch; ///< switched moving average predictor float *out_data);
int vq_indexes[5]; } SiprContext;
int pitch_delay[5]; ///< pitch delay
int gp_index[5]; ///< adaptive-codebook gain indexes
int16_t fc_indexes[5][10]; ///< fixed-codebook indexes
int gc_index[5]; ///< fixed-codebook gain indexes
} SiprParameters;
extern const float ff_pow_0_5[16]; extern const float ff_pow_0_5[16];

View File

@ -33,6 +33,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "libavutil/audioconvert.h" #include "libavutil/audioconvert.h"
#include "mathops.h"
#define ALT_BITSTREAM_READER_LE #define ALT_BITSTREAM_READER_LE
#include "get_bits.h" #include "get_bits.h"
@ -580,7 +581,7 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
HuffContext h[4]; HuffContext h[4];
VLC vlc[4]; VLC vlc[4];
int16_t *samples = data; int16_t *samples = data;
int8_t *samples8 = data; uint8_t *samples8 = data;
int val; int val;
int i, res; int i, res;
int unp_size; int unp_size;
@ -656,8 +657,8 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
else else
res = 0; res = 0;
val |= h[3].values[res] << 8; val |= h[3].values[res] << 8;
pred[1] += (int16_t)val; pred[1] += sign_extend(val, 16);
*samples++ = pred[1]; *samples++ = av_clip_int16(pred[1]);
} else { } else {
if(vlc[0].table) if(vlc[0].table)
res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3); res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
@ -669,8 +670,8 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
else else
res = 0; res = 0;
val |= h[1].values[res] << 8; val |= h[1].values[res] << 8;
pred[0] += val; pred[0] += sign_extend(val, 16);
*samples++ = pred[0]; *samples++ = av_clip_int16(pred[0]);
} }
} }
} else { //8-bit data } else { //8-bit data
@ -684,15 +685,15 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3); res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3);
else else
res = 0; res = 0;
pred[1] += (int8_t)h[1].values[res]; pred[1] += sign_extend(h[1].values[res], 8);
*samples8++ = pred[1]; *samples8++ = av_clip_uint8(pred[1]);
} else { } else {
if(vlc[0].table) if(vlc[0].table)
res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3); res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
else else
res = 0; res = 0;
pred[0] += (int8_t)h[0].values[res]; pred[0] += sign_extend(h[0].values[res], 8);
*samples8++ = pred[0]; *samples8++ = av_clip_uint8(pred[0]);
} }
} }
} }

View File

@ -930,8 +930,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
if (!v->field_mode || (v->field_mode && !v->numref)) { if (!v->field_mode || (v->field_mode && !v->numref)) {
valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty); valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
if (!valid_count) { if (!valid_count) {
s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0; s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0; s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0; v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
return; //no need to do MC for intra blocks return; //no need to do MC for intra blocks
} }
@ -943,8 +943,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
if (dominant) if (dominant)
chroma_ref_type = !v->cur_field_type; chroma_ref_type = !v->cur_field_type;
} }
s->current_picture.f.motion_val[1][s->block_index[0]][0] = tx; s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
s->current_picture.f.motion_val[1][s->block_index[0]][1] = ty; s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
uvmx = (tx + ((tx & 3) == 3)) >> 1; uvmx = (tx + ((tx & 3) == 3)) >> 1;
uvmy = (ty + ((ty & 3) == 3)) >> 1; uvmy = (ty + ((ty & 3) == 3)) >> 1;
@ -1422,29 +1422,36 @@ static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
int dim, int dir) int dim, int dir)
{ {
int brfd, scalesame; int brfd, scalesame;
int hpel = 1 - v->s.quarter_sample;
n >>= hpel;
if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) { if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
if (dim) if (dim)
return scaleforsame_y(v, i, n, dir); n = scaleforsame_y(v, i, n, dir) << hpel;
else else
return scaleforsame_x(v, n, dir); n = scaleforsame_x(v, n, dir) << hpel;
return n;
} }
brfd = FFMIN(v->brfd, 3); brfd = FFMIN(v->brfd, 3);
scalesame = vc1_b_field_mvpred_scales[0][brfd]; scalesame = vc1_b_field_mvpred_scales[0][brfd];
return n * scalesame >> 8; n = (n * scalesame >> 8) << hpel;
return n;
} }
static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */, static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
int dim, int dir) int dim, int dir)
{ {
int refdist, scaleopp; int refdist, scaleopp;
int hpel = 1 - v->s.quarter_sample;
n >>= hpel;
if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) { if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
if (dim) if (dim)
return scaleforopp_y(v, n, dir); n = scaleforopp_y(v, n, dir) << hpel;
else else
return scaleforopp_x(v, n); n = scaleforopp_x(v, n) << hpel;
return n;
} }
if (v->s.pict_type != AV_PICTURE_TYPE_B) if (v->s.pict_type != AV_PICTURE_TYPE_B)
refdist = FFMIN(v->refdist, 3); refdist = FFMIN(v->refdist, 3);
@ -1452,7 +1459,8 @@ static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
refdist = dir ? v->brfd : v->frfd; refdist = dir ? v->brfd : v->frfd;
scaleopp = vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist]; scaleopp = vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
return n * scaleopp >> 8; n = (n * scaleopp >> 8) << hpel;
return n;
} }
/** Predict and set motion vector /** Predict and set motion vector
@ -1467,12 +1475,10 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
int px, py; int px, py;
int sum; int sum;
int mixedmv_pic, num_samefield = 0, num_oppfield = 0; int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
int opposit, f; int opposit, a_f, b_f, c_f;
int16_t samefield_pred[2], oppfield_pred[2]; int16_t field_predA[2];
int16_t samefield_predA[2], oppfield_predA[2]; int16_t field_predB[2];
int16_t samefield_predB[2], oppfield_predB[2]; int16_t field_predC[2];
int16_t samefield_predC[2], oppfield_predC[2];
int16_t *predA, *predC;
int a_valid, b_valid, c_valid; int a_valid, b_valid, c_valid;
int hybridmv_thresh, y_bias = 0; int hybridmv_thresh, y_bias = 0;
@ -1546,96 +1552,34 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
} }
if (a_valid) { if (a_valid) {
f = v->mv_f[dir][xy - wrap + v->blocks_off]; a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
num_oppfield += f; num_oppfield += a_f;
num_samefield += 1 - f; num_samefield += 1 - a_f;
if (f) { field_predA[0] = A[0];
oppfield_predA[0] = A[0]; field_predA[1] = A[1];
oppfield_predA[1] = A[1];
samefield_predA[0] = scaleforsame(v, 0, A[0], 0, dir);
samefield_predA[1] = scaleforsame(v, n, A[1], 1, dir);
} else {
samefield_predA[0] = A[0];
samefield_predA[1] = A[1];
if (v->numref)
oppfield_predA[0] = scaleforopp(v, A[0], 0, dir);
if (v->numref)
oppfield_predA[1] = scaleforopp(v, A[1], 1, dir);
}
} else { } else {
samefield_predA[0] = samefield_predA[1] = 0; field_predA[0] = field_predA[1] = 0;
oppfield_predA[0] = oppfield_predA[1] = 0; a_f = 0;
}
if (c_valid) {
f = v->mv_f[dir][xy - 1 + v->blocks_off];
num_oppfield += f;
num_samefield += 1 - f;
if (f) {
oppfield_predC[0] = C[0];
oppfield_predC[1] = C[1];
samefield_predC[0] = scaleforsame(v, 0, C[0], 0, dir);
samefield_predC[1] = scaleforsame(v, n, C[1], 1, dir);
} else {
samefield_predC[0] = C[0];
samefield_predC[1] = C[1];
if (v->numref)
oppfield_predC[0] = scaleforopp(v, C[0], 0, dir);
if (v->numref)
oppfield_predC[1] = scaleforopp(v, C[1], 1, dir);
}
} else {
samefield_predC[0] = samefield_predC[1] = 0;
oppfield_predC[0] = oppfield_predC[1] = 0;
} }
if (b_valid) { if (b_valid) {
f = v->mv_f[dir][xy - wrap + off + v->blocks_off]; b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
num_oppfield += f; num_oppfield += b_f;
num_samefield += 1 - f; num_samefield += 1 - b_f;
if (f) { field_predB[0] = B[0];
oppfield_predB[0] = B[0]; field_predB[1] = B[1];
oppfield_predB[1] = B[1];
samefield_predB[0] = scaleforsame(v, 0, B[0], 0, dir);
samefield_predB[1] = scaleforsame(v, n, B[1], 1, dir);
} else {
samefield_predB[0] = B[0];
samefield_predB[1] = B[1];
if (v->numref)
oppfield_predB[0] = scaleforopp(v, B[0], 0, dir);
if (v->numref)
oppfield_predB[1] = scaleforopp(v, B[1], 1, dir);
}
} else { } else {
samefield_predB[0] = samefield_predB[1] = 0; field_predB[0] = field_predB[1] = 0;
oppfield_predB[0] = oppfield_predB[1] = 0; b_f = 0;
} }
if (c_valid) {
if (a_valid) { c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
samefield_pred[0] = samefield_predA[0]; num_oppfield += c_f;
samefield_pred[1] = samefield_predA[1]; num_samefield += 1 - c_f;
oppfield_pred[0] = oppfield_predA[0]; field_predC[0] = C[0];
oppfield_pred[1] = oppfield_predA[1]; field_predC[1] = C[1];
} else if (c_valid) {
samefield_pred[0] = samefield_predC[0];
samefield_pred[1] = samefield_predC[1];
oppfield_pred[0] = oppfield_predC[0];
oppfield_pred[1] = oppfield_predC[1];
} else if (b_valid) {
samefield_pred[0] = samefield_predB[0];
samefield_pred[1] = samefield_predB[1];
oppfield_pred[0] = oppfield_predB[0];
oppfield_pred[1] = oppfield_predB[1];
} else { } else {
samefield_pred[0] = samefield_pred[1] = 0; field_predC[0] = field_predC[1] = 0;
oppfield_pred[0] = oppfield_pred[1] = 0; c_f = 0;
}
if (num_samefield + num_oppfield > 1) {
samefield_pred[0] = mid_pred(samefield_predA[0], samefield_predB[0], samefield_predC[0]);
samefield_pred[1] = mid_pred(samefield_predA[1], samefield_predB[1], samefield_predC[1]);
if (v->numref)
oppfield_pred[0] = mid_pred(oppfield_predA[0], oppfield_predB[0], oppfield_predC[0]);
if (v->numref)
oppfield_pred[1] = mid_pred(oppfield_predA[1], oppfield_predB[1], oppfield_predC[1]);
} }
if (v->field_mode) { if (v->field_mode) {
@ -1646,21 +1590,56 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
} else } else
opposit = 0; opposit = 0;
if (opposit) { if (opposit) {
px = oppfield_pred[0]; if (a_valid && !a_f) {
py = oppfield_pred[1]; field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
predA = oppfield_predA; field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
predC = oppfield_predC; }
v->mv_f[dir][xy + v->blocks_off] = f = 1; if (b_valid && !b_f) {
field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
}
if (c_valid && !c_f) {
field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
}
v->mv_f[dir][xy + v->blocks_off] = 1;
v->ref_field_type[dir] = !v->cur_field_type; v->ref_field_type[dir] = !v->cur_field_type;
} else { } else {
px = samefield_pred[0]; if (a_valid && a_f) {
py = samefield_pred[1]; field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
predA = samefield_predA; field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
predC = samefield_predC; }
v->mv_f[dir][xy + v->blocks_off] = f = 0; if (b_valid && b_f) {
field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
}
if (c_valid && c_f) {
field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
}
v->mv_f[dir][xy + v->blocks_off] = 0;
v->ref_field_type[dir] = v->cur_field_type; v->ref_field_type[dir] = v->cur_field_type;
} }
if (a_valid) {
px = field_predA[0];
py = field_predA[1];
} else if (c_valid) {
px = field_predC[0];
py = field_predC[1];
} else if (b_valid) {
px = field_predB[0];
py = field_predB[1];
} else {
px = 0;
py = 0;
}
if (num_samefield + num_oppfield > 1) {
px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
}
/* Pullback MV as specified in 8.3.5.3.4 */ /* Pullback MV as specified in 8.3.5.3.4 */
if (!v->field_mode) { if (!v->field_mode) {
int qx, qy, X, Y; int qx, qy, X, Y;
@ -1681,35 +1660,32 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) { if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
/* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */ /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
if (v->field_mode && !s->quarter_sample) hybridmv_thresh = 32;
hybridmv_thresh = 16;
else
hybridmv_thresh = 32;
if (a_valid && c_valid) { if (a_valid && c_valid) {
if (is_intra[xy - wrap]) if (is_intra[xy - wrap])
sum = FFABS(px) + FFABS(py); sum = FFABS(px) + FFABS(py);
else else
sum = FFABS(px - predA[0]) + FFABS(py - predA[1]); sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
if (sum > hybridmv_thresh) { if (sum > hybridmv_thresh) {
if (get_bits1(&s->gb)) { // read HYBRIDPRED bit if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
px = predA[0]; px = field_predA[0];
py = predA[1]; py = field_predA[1];
} else { } else {
px = predC[0]; px = field_predC[0];
py = predC[1]; py = field_predC[1];
} }
} else { } else {
if (is_intra[xy - 1]) if (is_intra[xy - 1])
sum = FFABS(px) + FFABS(py); sum = FFABS(px) + FFABS(py);
else else
sum = FFABS(px - predC[0]) + FFABS(py - predC[1]); sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
if (sum > hybridmv_thresh) { if (sum > hybridmv_thresh) {
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
px = predA[0]; px = field_predA[0];
py = predA[1]; py = field_predA[1];
} else { } else {
px = predC[0]; px = field_predC[0];
py = predC[1]; py = field_predC[1];
} }
} }
} }

View File

@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H #define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53 #define LIBAVCODEC_VERSION_MAJOR 53
#define LIBAVCODEC_VERSION_MINOR 29 #define LIBAVCODEC_VERSION_MINOR 30
#define LIBAVCODEC_VERSION_MICRO 0 #define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \

View File

@ -239,7 +239,7 @@ static int iff_read_header(AVFormatContext *s,
switch (iff->svx8_compression) { switch (iff->svx8_compression) {
case COMP_NONE: case COMP_NONE:
st->codec->codec_id = CODEC_ID_8SVX_RAW; st->codec->codec_id = CODEC_ID_PCM_S8_PLANAR;
break; break;
case COMP_FIB: case COMP_FIB:
st->codec->codec_id = CODEC_ID_8SVX_FIB; st->codec->codec_id = CODEC_ID_8SVX_FIB;

View File

@ -31,6 +31,7 @@
#include "libavutil/parseutils.h" #include "libavutil/parseutils.h"
#include "libavutil/fifo.h" #include "libavutil/fifo.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "libavutil/avstring.h"
#include <unistd.h> #include <unistd.h>
#include "internal.h" #include "internal.h"
#include "network.h" #include "network.h"
@ -195,8 +196,8 @@ static int udp_set_url(struct sockaddr_storage *addr,
return addr_len; return addr_len;
} }
static int udp_socket_create(UDPContext *s, static int udp_socket_create(UDPContext *s, struct sockaddr_storage *addr,
struct sockaddr_storage *addr, int *addr_len) int *addr_len, const char *localaddr)
{ {
int udp_fd = -1; int udp_fd = -1;
struct addrinfo *res0 = NULL, *res = NULL; struct addrinfo *res0 = NULL, *res = NULL;
@ -204,7 +205,8 @@ static int udp_socket_create(UDPContext *s,
if (((struct sockaddr *) &s->dest_addr)->sa_family) if (((struct sockaddr *) &s->dest_addr)->sa_family)
family = ((struct sockaddr *) &s->dest_addr)->sa_family; family = ((struct sockaddr *) &s->dest_addr)->sa_family;
res0 = udp_resolve_host(0, s->local_port, SOCK_DGRAM, family, AI_PASSIVE); res0 = udp_resolve_host(localaddr[0] ? localaddr : NULL, s->local_port,
SOCK_DGRAM, family, AI_PASSIVE);
if (res0 == 0) if (res0 == 0)
goto fail; goto fail;
for (res = res0; res; res=res->ai_next) { for (res = res0; res; res=res->ai_next) {
@ -377,7 +379,7 @@ static void *circular_buffer_task( void *_URLContext)
/* return non zero if error */ /* return non zero if error */
static int udp_open(URLContext *h, const char *uri, int flags) static int udp_open(URLContext *h, const char *uri, int flags)
{ {
char hostname[1024]; char hostname[1024], localaddr[1024] = "";
int port, udp_fd = -1, tmp, bind_ret = -1; int port, udp_fd = -1, tmp, bind_ret = -1;
UDPContext *s = NULL; UDPContext *s = NULL;
int is_output; int is_output;
@ -430,6 +432,9 @@ static int udp_open(URLContext *h, const char *uri, int flags)
if (av_find_info_tag(buf, sizeof(buf), "fifo_size", p)) { if (av_find_info_tag(buf, sizeof(buf), "fifo_size", p)) {
s->circular_buffer_size = strtol(buf, NULL, 10)*188; s->circular_buffer_size = strtol(buf, NULL, 10)*188;
} }
if (av_find_info_tag(buf, sizeof(buf), "localaddr", p)) {
av_strlcpy(localaddr, buf, sizeof(localaddr));
}
} }
/* fill the dest addr */ /* fill the dest addr */
@ -447,7 +452,7 @@ static int udp_open(URLContext *h, const char *uri, int flags)
if ((s->is_multicast || !s->local_port) && (h->flags & AVIO_FLAG_READ)) if ((s->is_multicast || !s->local_port) && (h->flags & AVIO_FLAG_READ))
s->local_port = port; s->local_port = port;
udp_fd = udp_socket_create(s, &my_addr, &len); udp_fd = udp_socket_create(s, &my_addr, &len, localaddr);
if (udp_fd < 0) if (udp_fd < 0)
goto fail; goto fail;