mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-01-08 13:22:53 +02:00
Fix various unused variable warnings
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
parent
fd38a15adf
commit
adba9c6352
2
ffmpeg.c
2
ffmpeg.c
@ -3720,7 +3720,7 @@ static void new_audio_stream(AVFormatContext *oc, int file_idx)
|
||||
static void new_data_stream(AVFormatContext *oc, int file_idx)
|
||||
{
|
||||
AVStream *st;
|
||||
AVOutputStream *ost;
|
||||
AVOutputStream *ost av_unused;
|
||||
AVCodec *codec=NULL;
|
||||
AVCodecContext *data_enc;
|
||||
|
||||
|
4
ffplay.c
4
ffplay.c
@ -1438,7 +1438,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_
|
||||
|
||||
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
|
||||
{
|
||||
int len1, got_picture, i;
|
||||
int len1 av_unused, got_picture, i;
|
||||
|
||||
if (packet_queue_get(&is->videoq, pkt, 1) < 0)
|
||||
return -1;
|
||||
@ -1813,7 +1813,7 @@ static int subtitle_thread(void *arg)
|
||||
VideoState *is = arg;
|
||||
SubPicture *sp;
|
||||
AVPacket pkt1, *pkt = &pkt1;
|
||||
int len1, got_subtitle;
|
||||
int len1 av_unused, got_subtitle;
|
||||
double pts;
|
||||
int i, j;
|
||||
int r, g, b, y, u, v, a;
|
||||
|
@ -252,7 +252,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
|
||||
int b_width;
|
||||
|
||||
int req_size;
|
||||
int num_frames = c->mc_lifetime;
|
||||
|
||||
int *charmap = c->mc_charmap;
|
||||
uint8_t *colram = c->mc_colram;
|
||||
@ -280,7 +279,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
|
||||
if (!c->mc_lifetime) return 0;
|
||||
/* no more frames in queue, prepare to flush remaining frames */
|
||||
if (!c->mc_frame_counter) {
|
||||
num_frames = c->mc_lifetime;
|
||||
c->mc_lifetime = 0;
|
||||
}
|
||||
/* still frames in queue so limit lifetime to remaining frames */
|
||||
|
@ -109,7 +109,7 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
old_out2 = out[-2];
|
||||
old_out3 = out[-1];
|
||||
for (n = 0; n <= buffer_length - 4; n+=4) {
|
||||
float tmp0,tmp1,tmp2,tmp3;
|
||||
float tmp0,tmp1,tmp2;
|
||||
float val;
|
||||
|
||||
out0 = in[0];
|
||||
@ -160,7 +160,6 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
tmp0 = out0;
|
||||
tmp1 = out1;
|
||||
tmp2 = out2;
|
||||
tmp3 = out3;
|
||||
|
||||
out3 -= a * tmp2;
|
||||
out2 -= a * tmp1;
|
||||
|
@ -1535,8 +1535,8 @@ static void dca_exss_parse_header(DCAContext *s)
|
||||
{
|
||||
int ss_index;
|
||||
int blownup;
|
||||
int header_size;
|
||||
int hd_size;
|
||||
int header_size av_unused;
|
||||
int hd_size av_unused;
|
||||
int num_audiop = 1;
|
||||
int num_assets = 1;
|
||||
int active_ss_mask[8];
|
||||
@ -1622,7 +1622,6 @@ static int dca_decode_frame(AVCodecContext * avctx,
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
int data_size_tmp;
|
||||
|
||||
int lfe_samples;
|
||||
int num_core_channels = 0;
|
||||
|
@ -245,7 +245,7 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
|
||||
int ff_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb,
|
||||
dirac_source_params *source)
|
||||
{
|
||||
unsigned version_major, version_minor;
|
||||
unsigned version_major, version_minor av_unused;
|
||||
unsigned video_format, picture_coding_mode;
|
||||
|
||||
version_major = svq3_get_ue_golomb(gb);
|
||||
|
@ -235,7 +235,7 @@ int rv_decode_dc(MpegEncContext *s, int n)
|
||||
/* read RV 1.0 compatible frame header */
|
||||
static int rv10_decode_picture_header(MpegEncContext *s)
|
||||
{
|
||||
int mb_count, pb_frame, marker, unk, mb_xy;
|
||||
int mb_count, pb_frame, marker, unk av_unused, mb_xy;
|
||||
|
||||
marker = get_bits1(&s->gb);
|
||||
|
||||
|
@ -29,7 +29,7 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
|
||||
int buf_size)
|
||||
{
|
||||
uint32_t h;
|
||||
int frame_size, channels, id, bits;
|
||||
int frame_size, channels, id av_unused, bits;
|
||||
|
||||
if (buf_size <= AES3_HEADER_LEN) {
|
||||
av_log(avctx, AV_LOG_ERROR, "frame is too short\n");
|
||||
|
@ -196,7 +196,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
{
|
||||
GetBitContext hb;
|
||||
int len;
|
||||
int chunk_size;
|
||||
int chunk_size av_unused;
|
||||
short wave_format;
|
||||
|
||||
init_get_bits(&hb, header, header_size*8);
|
||||
|
@ -38,15 +38,12 @@ static int sp5x_decode_frame(AVCodecContext *avctx,
|
||||
int buf_size = avpkt->size;
|
||||
AVPacket avpkt_recoded;
|
||||
const int qscale = 5;
|
||||
const uint8_t *buf_ptr;
|
||||
uint8_t *recoded;
|
||||
int i = 0, j = 0;
|
||||
|
||||
if (!avctx->width || !avctx->height)
|
||||
return -1;
|
||||
|
||||
buf_ptr = buf;
|
||||
|
||||
recoded = av_mallocz(buf_size + 1024);
|
||||
if (!recoded)
|
||||
return -1;
|
||||
|
@ -201,7 +201,6 @@ static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
|
||||
{
|
||||
uint32_t magic;
|
||||
const uint8_t *obuf;
|
||||
int length;
|
||||
|
||||
obuf = buf;
|
||||
|
||||
@ -212,7 +211,7 @@ static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
|
||||
/* av_log (ctx->avctx, AV_LOG_ERROR, "TM2 old header: not implemented (yet)\n"); */
|
||||
return 40;
|
||||
} else if(magic == 0x00000101) { /* new header */
|
||||
int w, h, size, flags, xr, yr;
|
||||
av_unused int w, h, size, flags, xr, yr, length;
|
||||
|
||||
length = AV_RL32(buf);
|
||||
buf += 4;
|
||||
|
@ -109,7 +109,7 @@ static int vaapi_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer
|
||||
MpegEncContext * const s = avctx->priv_data;
|
||||
VASliceParameterBufferMPEG2 *slice_param;
|
||||
GetBitContext gb;
|
||||
uint32_t start_code, quantiser_scale_code, intra_slice_flag, macroblock_offset;
|
||||
uint32_t start_code av_unused, quantiser_scale_code, intra_slice_flag, macroblock_offset;
|
||||
|
||||
av_dlog(avctx, "vaapi_mpeg2_decode_slice(): buffer %p, size %d\n", buffer, size);
|
||||
|
||||
|
@ -199,7 +199,6 @@ static void vmd_decode(VmdVideoContext *s)
|
||||
|
||||
int frame_x, frame_y;
|
||||
int frame_width, frame_height;
|
||||
int dp_size;
|
||||
|
||||
frame_x = AV_RL16(&s->buf[6]);
|
||||
frame_y = AV_RL16(&s->buf[8]);
|
||||
@ -247,7 +246,6 @@ static void vmd_decode(VmdVideoContext *s)
|
||||
}
|
||||
|
||||
dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
|
||||
dp_size = s->frame.linesize[0] * s->avctx->height;
|
||||
pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
|
||||
switch (meth) {
|
||||
case 1:
|
||||
|
@ -181,7 +181,7 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void
|
||||
int x, y, bh2, bw2, xored;
|
||||
uint8_t *tsrc, *tprev;
|
||||
uint8_t *mv;
|
||||
int mx, my, bv;
|
||||
int mx, my;
|
||||
|
||||
bw = (avctx->width + ZMBV_BLOCK - 1) / ZMBV_BLOCK;
|
||||
bh = (avctx->height + ZMBV_BLOCK - 1) / ZMBV_BLOCK;
|
||||
@ -197,7 +197,7 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void
|
||||
tsrc = src + x;
|
||||
tprev = prev + x;
|
||||
|
||||
bv = zmbv_me(c, tsrc, p->linesize[0], tprev, c->pstride, x, y, &mx, &my, &xored);
|
||||
zmbv_me(c, tsrc, p->linesize[0], tprev, c->pstride, x, y, &mx, &my, &xored);
|
||||
mv[0] = (mx << 1) | !!xored;
|
||||
mv[1] = my << 1;
|
||||
tprev += mx + my * c->pstride;
|
||||
|
@ -153,7 +153,7 @@ static void end_frame(AVFilterLink *inlink)
|
||||
AVFilterBufferRef *inpicref = inlink->cur_buf;
|
||||
AVFilterBufferRef *outpicref = outlink->out_buf;
|
||||
|
||||
int h, w, plane, line_step, line_size, line;
|
||||
int h, plane, line_step, line_size, line;
|
||||
uint8_t *cpy_src, *cpy_dst;
|
||||
|
||||
if ( inpicref->video->interlaced
|
||||
@ -162,7 +162,6 @@ static void end_frame(AVFilterLink *inlink)
|
||||
"picture will move %s one line\n",
|
||||
fieldorder->dst_tff ? "up" : "down");
|
||||
h = inpicref->video->h;
|
||||
w = inpicref->video->w;
|
||||
for (plane = 0; plane < 4 && inpicref->data[plane]; plane++) {
|
||||
line_step = inpicref->linesize[plane];
|
||||
line_size = fieldorder->line_size[plane];
|
||||
|
@ -246,7 +246,7 @@ static int fourxm_read_packet(AVFormatContext *s,
|
||||
FourxmDemuxContext *fourxm = s->priv_data;
|
||||
AVIOContext *pb = s->pb;
|
||||
unsigned int fourcc_tag;
|
||||
unsigned int size, out_size;
|
||||
unsigned int size, out_size av_unused;
|
||||
int ret = 0;
|
||||
unsigned int track_number;
|
||||
int packet_read = 0;
|
||||
|
@ -35,7 +35,7 @@ static int ape_tag_read_field(AVFormatContext *s)
|
||||
{
|
||||
AVIOContext *pb = s->pb;
|
||||
uint8_t key[1024], *value;
|
||||
uint32_t size, flags;
|
||||
uint32_t size, flags av_unused;
|
||||
int i, c;
|
||||
|
||||
size = avio_rl32(pb); /* field size */
|
||||
|
@ -209,7 +209,7 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
|
||||
ff_asf_guid g;
|
||||
enum AVMediaType type;
|
||||
int type_specific_size, sizeX;
|
||||
uint64_t total_size;
|
||||
uint64_t total_size av_unused;
|
||||
unsigned int tag1;
|
||||
int64_t pos1, pos2, start_time;
|
||||
int test_for_ext_stream_audio, is_dvr_ms_audio=0;
|
||||
@ -393,7 +393,7 @@ static int asf_read_ext_stream_properties(AVFormatContext *s, int64_t size)
|
||||
AVIOContext *pb = s->pb;
|
||||
ff_asf_guid g;
|
||||
int ext_len, payload_ext_ct, stream_ct, i;
|
||||
uint32_t ext_d, leak_rate, stream_num;
|
||||
uint32_t ext_d av_unused, leak_rate, stream_num;
|
||||
unsigned int stream_languageid_index;
|
||||
|
||||
avio_rl64(pb); // starttime
|
||||
@ -511,7 +511,7 @@ static int asf_read_metadata(AVFormatContext *s, int64_t size)
|
||||
{
|
||||
AVIOContext *pb = s->pb;
|
||||
ASFContext *asf = s->priv_data;
|
||||
int n, stream_num, name_len, value_len, value_type, value_num;
|
||||
int n, stream_num, name_len, value_len, value_type av_unused, value_num;
|
||||
int ret, i;
|
||||
n = avio_rl16(pb);
|
||||
|
||||
@ -626,7 +626,7 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
// if so the next iteration will pick it up
|
||||
continue;
|
||||
} else if (!ff_guidcmp(&g, &ff_asf_head1_guid)) {
|
||||
int v1, v2;
|
||||
av_unused int v1, v2;
|
||||
ff_get_guid(pb, &g);
|
||||
v1 = avio_rl32(pb);
|
||||
v2 = avio_rl16(pb);
|
||||
@ -799,7 +799,7 @@ static int asf_read_frame_header(AVFormatContext *s, AVIOContext *pb){
|
||||
ASFContext *asf = s->priv_data;
|
||||
int rsize = 1;
|
||||
int num = avio_r8(pb);
|
||||
int64_t ts0, ts1;
|
||||
int64_t ts0, ts1 av_unused;
|
||||
|
||||
asf->packet_segments--;
|
||||
asf->packet_key_frame = num >> 7;
|
||||
|
@ -186,7 +186,7 @@ static int mmf_read_header(AVFormatContext *s,
|
||||
unsigned int tag;
|
||||
AVIOContext *pb = s->pb;
|
||||
AVStream *st;
|
||||
int64_t file_size, size;
|
||||
int64_t file_size av_unused, size;
|
||||
int rate, params;
|
||||
|
||||
tag = avio_rl32(pb);
|
||||
@ -263,12 +263,10 @@ static int mmf_read_packet(AVFormatContext *s,
|
||||
AVPacket *pkt)
|
||||
{
|
||||
MMFContext *mmf = s->priv_data;
|
||||
AVStream *st;
|
||||
int ret, size;
|
||||
|
||||
if (url_feof(s->pb))
|
||||
return AVERROR(EIO);
|
||||
st = s->streams[0];
|
||||
|
||||
size = MAX_SIZE;
|
||||
if(size > mmf->data_size)
|
||||
|
@ -469,21 +469,21 @@ static int mov_read_hdlr(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
int ff_mov_read_esds(AVFormatContext *fc, AVIOContext *pb, MOVAtom atom)
|
||||
{
|
||||
AVStream *st;
|
||||
int tag, len;
|
||||
int tag;
|
||||
|
||||
if (fc->nb_streams < 1)
|
||||
return 0;
|
||||
st = fc->streams[fc->nb_streams-1];
|
||||
|
||||
avio_rb32(pb); /* version + flags */
|
||||
len = ff_mp4_read_descr(fc, pb, &tag);
|
||||
ff_mp4_read_descr(fc, pb, &tag);
|
||||
if (tag == MP4ESDescrTag) {
|
||||
avio_rb16(pb); /* ID */
|
||||
avio_r8(pb); /* priority */
|
||||
} else
|
||||
avio_rb16(pb); /* ID */
|
||||
|
||||
len = ff_mp4_read_descr(fc, pb, &tag);
|
||||
ff_mp4_read_descr(fc, pb, &tag);
|
||||
if (tag == MP4DecConfigDescrTag)
|
||||
ff_mp4_read_dec_config_descr(fc, st, pb);
|
||||
return 0;
|
||||
|
@ -197,8 +197,8 @@ static int ogg_read_page(AVFormatContext *s, int *str)
|
||||
int flags, nsegs;
|
||||
uint64_t gp;
|
||||
uint32_t serial;
|
||||
uint32_t seq;
|
||||
uint32_t crc;
|
||||
uint32_t seq av_unused;
|
||||
uint32_t crc av_unused;
|
||||
int size, idx;
|
||||
uint8_t sync[4];
|
||||
int sp = 0;
|
||||
|
@ -41,8 +41,8 @@ static int celt_header(AVFormatContext *s, int idx)
|
||||
|
||||
/* Main header */
|
||||
|
||||
uint32_t version, header_size, sample_rate, nb_channels, frame_size;
|
||||
uint32_t overlap, bytes_per_packet, extra_headers;
|
||||
uint32_t version, header_size av_unused, sample_rate, nb_channels, frame_size;
|
||||
uint32_t overlap, bytes_per_packet av_unused, extra_headers;
|
||||
uint8_t *extradata;
|
||||
|
||||
extradata = av_malloc(2 * sizeof(uint32_t) +
|
||||
|
@ -39,7 +39,7 @@ ogm_header(AVFormatContext *s, int idx)
|
||||
const uint8_t *p = os->buf + os->pstart;
|
||||
uint64_t time_unit;
|
||||
uint64_t spu;
|
||||
uint32_t default_len;
|
||||
uint32_t default_len av_unused;
|
||||
|
||||
if(!(*p & 1))
|
||||
return 0;
|
||||
|
@ -80,8 +80,8 @@ static av_cold int rl2_read_header(AVFormatContext *s,
|
||||
unsigned int audio_frame_counter = 0;
|
||||
unsigned int video_frame_counter = 0;
|
||||
unsigned int back_size;
|
||||
int data_size;
|
||||
unsigned short encoding_method;
|
||||
int data_size av_unused;
|
||||
unsigned short encoding_method av_unused;
|
||||
unsigned short sound_rate;
|
||||
unsigned short rate;
|
||||
unsigned short channels;
|
||||
|
@ -280,7 +280,7 @@ ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
|
||||
if (rm_read_audio_stream_info(s, pb, st, rst, 0))
|
||||
return -1;
|
||||
} else {
|
||||
int fps, fps2;
|
||||
int fps, fps2 av_unused;
|
||||
if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) {
|
||||
fail1:
|
||||
av_log(st->codec, AV_LOG_ERROR, "Unsupported video codec\n");
|
||||
|
@ -299,7 +299,7 @@ static int rpl_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
stream->codec->codec_tag == 124) {
|
||||
// We have to split Escape 124 frames because there are
|
||||
// multiple frames per chunk in Escape 124 samples.
|
||||
uint32_t frame_size, frame_flags;
|
||||
uint32_t frame_size, frame_flags av_unused;
|
||||
|
||||
frame_flags = avio_rl32(pb);
|
||||
frame_size = avio_rl32(pb);
|
||||
|
@ -108,7 +108,7 @@ static int parse_fmtp_config(AVStream *st, char *value)
|
||||
int len = ff_hex_to_data(NULL, value), i, ret = 0;
|
||||
GetBitContext gb;
|
||||
uint8_t *config;
|
||||
int audio_mux_version, same_time_framing, num_sub_frames,
|
||||
int audio_mux_version, same_time_framing, num_sub_frames av_unused,
|
||||
num_programs, num_layers;
|
||||
|
||||
/* Pad this buffer, too, to avoid out of bounds reads with get_bits below */
|
||||
|
@ -32,7 +32,7 @@ int ff_sauce_read(AVFormatContext *avctx, uint64_t *fsize, int *got_width, int g
|
||||
{
|
||||
AVIOContext *pb = avctx->pb;
|
||||
char buf[36];
|
||||
int datatype, filetype, t1, t2, nb_comments, flags;
|
||||
int datatype, filetype, t1, t2, nb_comments, flags av_unused;
|
||||
uint64_t start_pos = avio_size(pb) - 128;
|
||||
|
||||
avio_seek(pb, start_pos, SEEK_SET);
|
||||
|
@ -233,7 +233,6 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
int i;
|
||||
int frame_size = 0;
|
||||
int palchange = 0;
|
||||
int pos;
|
||||
|
||||
if (url_feof(s->pb) || smk->cur_frame >= smk->frames)
|
||||
return AVERROR_EOF;
|
||||
@ -244,7 +243,6 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
frame_size = smk->frm_size[smk->cur_frame] & (~3);
|
||||
flags = smk->frm_flags[smk->cur_frame];
|
||||
/* handle palette change event */
|
||||
pos = avio_tell(s->pb);
|
||||
if(flags & SMACKER_PAL){
|
||||
int size, sz, t, off, j, pos;
|
||||
uint8_t *pal = smk->pal;
|
||||
|
@ -85,7 +85,7 @@ static int sol_channels(int magic, int type)
|
||||
static int sol_read_header(AVFormatContext *s,
|
||||
AVFormatParameters *ap)
|
||||
{
|
||||
int size;
|
||||
int size av_unused;
|
||||
unsigned int magic,tag;
|
||||
AVIOContext *pb = s->pb;
|
||||
unsigned int id, channels, rate, type;
|
||||
|
@ -94,7 +94,7 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
AVPicture *picture;
|
||||
int* first_pkt = s->priv_data;
|
||||
int width, height, h_chroma_shift, v_chroma_shift;
|
||||
int i, m;
|
||||
int i;
|
||||
char buf2[Y4M_LINE_MAX+1];
|
||||
char buf1[20];
|
||||
uint8_t *ptr, *ptr1, *ptr2;
|
||||
@ -114,7 +114,7 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
|
||||
/* construct frame header */
|
||||
|
||||
m = snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
|
||||
snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
|
||||
avio_write(pb, buf1, strlen(buf1));
|
||||
|
||||
width = st->codec->width;
|
||||
|
Loading…
Reference in New Issue
Block a user