mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Merge remote-tracking branch 'qatar/master'
* qatar/master:
vsrc_buffer: fix check from 7ae7c41
.
libxvid: Reorder functions to avoid forward declarations; make functions static.
libxvid: drop some pointless dead code
wmal: vertical alignment cosmetics
wmal: Warn about missing bitstream splicing feature and ask for sample.
wmal: Skip seekable_frame_in_packet.
wmal: Drop unused variable num_possible_block_size.
avfiltergraph: make the AVFilterInOut alloc/free API public
graphparser: allow specifying sws flags in the graph description.
graphparser: fix the order of connecting unlabeled links.
graphparser: add avfilter_graph_parse2().
vsrc_buffer: allow using a NULL buffer to signal EOF.
swscale: handle last pixel if lines have an odd width.
qdm2: fix a dubious pointer cast
WMAL: Do not try to read rawpcm coefficients if bits is invalid
mov: Fix detecting there is no sync sample.
tiffdec: K&R cosmetics
avf: has_duration does not check the global one
dsputil: fix optimized emu_edge function on Win64.
Conflicts:
doc/APIchanges
libavcodec/libxvid_rc.c
libavcodec/libxvidff.c
libavcodec/tiff.c
libavcodec/wmalosslessdec.c
libavfilter/avfiltergraph.h
libavfilter/graphparser.c
libavfilter/version.h
libavfilter/vsrc_buffer.c
libswscale/output.c
Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
7432bcfe5a
@ -19,6 +19,9 @@ API changes, most recent first:
|
||||
2012-03-26 - a67d9cf - lavfi 2.66.100
|
||||
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
|
||||
|
||||
2012-xx-xx - xxxxxxx - lavfi 2.16.0 - avfiltergraph.h
|
||||
Add avfilter_graph_parse2()
|
||||
|
||||
2012-xx-xx - xxxxxxx - lavu 51.27.0 - samplefmt.h
|
||||
Add av_get_packed_sample_fmt() and av_get_planar_sample_fmt()
|
||||
|
||||
|
@ -76,6 +76,12 @@ In a complete filterchain all the unlabelled filter input and output
|
||||
pads must be connected. A filtergraph is considered valid if all the
|
||||
filter input and output pads of all the filterchains are connected.
|
||||
|
||||
Libavfilter will automatically insert scale filters where format
|
||||
conversion is required. It is possible to specify swscale flags
|
||||
for those automatically inserted scalers by prepending
|
||||
@code{sws_flags=@var{flags};}
|
||||
to the filtergraph description.
|
||||
|
||||
Follows a BNF description for the filtergraph syntax:
|
||||
@example
|
||||
@var{NAME} ::= sequence of alphanumeric characters and '_'
|
||||
@ -84,7 +90,7 @@ Follows a BNF description for the filtergraph syntax:
|
||||
@var{FILTER_ARGUMENTS} ::= sequence of chars (eventually quoted)
|
||||
@var{FILTER} ::= [@var{LINKNAMES}] @var{NAME} ["=" @var{ARGUMENTS}] [@var{LINKNAMES}]
|
||||
@var{FILTERCHAIN} ::= @var{FILTER} [,@var{FILTERCHAIN}]
|
||||
@var{FILTERGRAPH} ::= @var{FILTERCHAIN} [;@var{FILTERGRAPH}]
|
||||
@var{FILTERGRAPH} ::= [sws_flags=@var{flags};] @var{FILTERCHAIN} [;@var{FILTERGRAPH}]
|
||||
@end example
|
||||
|
||||
@c man end FILTERGRAPH DESCRIPTION
|
||||
|
@ -31,16 +31,12 @@
|
||||
#undef NDEBUG
|
||||
#include <assert.h>
|
||||
|
||||
extern unsigned int xvid_debug;
|
||||
|
||||
int ff_xvid_rate_control_init(MpegEncContext *s){
|
||||
char *tmp_name;
|
||||
int fd, i;
|
||||
xvid_plg_create_t xvid_plg_create = { 0 };
|
||||
xvid_plugin_2pass2_t xvid_2pass2 = { 0 };
|
||||
|
||||
//xvid_debug=-1;
|
||||
|
||||
fd=av_tempfile("xvidrc.", &tmp_name, 0, s->avctx);
|
||||
if (fd == -1) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Can't create temporary pass2 file.\n");
|
||||
@ -58,7 +54,6 @@ int ff_xvid_rate_control_init(MpegEncContext *s){
|
||||
frame_types[rce->pict_type], (int)lrintf(rce->qscale / FF_QP2LAMBDA), rce->i_count, s->mb_num - rce->i_count - rce->skip_count,
|
||||
rce->skip_count, (rce->i_tex_bits + rce->p_tex_bits + rce->misc_bits+7)/8, (rce->header_bits+rce->mv_bits+7)/8);
|
||||
|
||||
//av_log(NULL, AV_LOG_ERROR, "%s\n", tmp);
|
||||
if (write(fd, tmp, strlen(tmp)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error %s writing 2pass logfile\n", strerror(errno));
|
||||
return AVERROR(errno);
|
||||
|
@ -72,12 +72,277 @@ struct xvid_ff_pass1 {
|
||||
struct xvid_context *context; /**< Pointer to private context */
|
||||
};
|
||||
|
||||
/* Prototypes - See function implementation for details */
|
||||
int xvid_strip_vol_header(AVCodecContext *avctx, AVPacket *pkt, unsigned int header_len, unsigned int frame_len);
|
||||
int xvid_ff_2pass(void *ref, int opt, void *p1, void *p2);
|
||||
void xvid_correct_framerate(AVCodecContext *avctx);
|
||||
/*
|
||||
* Xvid 2-Pass Kludge Section
|
||||
*
|
||||
* Xvid's default 2-pass doesn't allow us to create data as we need to, so
|
||||
* this section spends time replacing the first pass plugin so we can write
|
||||
* statistic information as libavcodec requests in. We have another kludge
|
||||
* that allows us to pass data to the second pass in Xvid without a custom
|
||||
* rate-control plugin.
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* Initialize the two-pass plugin and context.
|
||||
*
|
||||
* @param param Input construction parameter structure
|
||||
* @param handle Private context handle
|
||||
* @return Returns XVID_ERR_xxxx on failure, or 0 on success.
|
||||
*/
|
||||
static int xvid_ff_2pass_create(xvid_plg_create_t * param,
|
||||
void ** handle) {
|
||||
struct xvid_ff_pass1 *x = (struct xvid_ff_pass1 *)param->param;
|
||||
char *log = x->context->twopassbuffer;
|
||||
|
||||
/* Do a quick bounds check */
|
||||
if( log == NULL )
|
||||
return XVID_ERR_FAIL;
|
||||
|
||||
/* We use snprintf() */
|
||||
/* This is because we can safely prevent a buffer overflow */
|
||||
log[0] = 0;
|
||||
snprintf(log, BUFFER_REMAINING(log),
|
||||
"# ffmpeg 2-pass log file, using xvid codec\n");
|
||||
snprintf(BUFFER_CAT(log), BUFFER_REMAINING(log),
|
||||
"# Do not modify. libxvidcore version: %d.%d.%d\n\n",
|
||||
XVID_VERSION_MAJOR(XVID_VERSION),
|
||||
XVID_VERSION_MINOR(XVID_VERSION),
|
||||
XVID_VERSION_PATCH(XVID_VERSION));
|
||||
|
||||
*handle = x->context;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy the two-pass plugin context.
|
||||
*
|
||||
* @param ref Context pointer for the plugin
|
||||
* @param param Destrooy context
|
||||
* @return Returns 0, success guaranteed
|
||||
*/
|
||||
static int xvid_ff_2pass_destroy(struct xvid_context *ref,
|
||||
xvid_plg_destroy_t *param) {
|
||||
/* Currently cannot think of anything to do on destruction */
|
||||
/* Still, the framework should be here for reference/use */
|
||||
if( ref->twopassbuffer != NULL )
|
||||
ref->twopassbuffer[0] = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable fast encode mode during the first pass.
|
||||
*
|
||||
* @param ref Context pointer for the plugin
|
||||
* @param param Frame data
|
||||
* @return Returns 0, success guaranteed
|
||||
*/
|
||||
static int xvid_ff_2pass_before(struct xvid_context *ref,
|
||||
xvid_plg_data_t *param) {
|
||||
int motion_remove;
|
||||
int motion_replacements;
|
||||
int vop_remove;
|
||||
|
||||
/* Nothing to do here, result is changed too much */
|
||||
if( param->zone && param->zone->mode == XVID_ZONE_QUANT )
|
||||
return 0;
|
||||
|
||||
/* We can implement a 'turbo' first pass mode here */
|
||||
param->quant = 2;
|
||||
|
||||
/* Init values */
|
||||
motion_remove = ~XVID_ME_CHROMA_PVOP &
|
||||
~XVID_ME_CHROMA_BVOP &
|
||||
~XVID_ME_EXTSEARCH16 &
|
||||
~XVID_ME_ADVANCEDDIAMOND16;
|
||||
motion_replacements = XVID_ME_FAST_MODEINTERPOLATE |
|
||||
XVID_ME_SKIP_DELTASEARCH |
|
||||
XVID_ME_FASTREFINE16 |
|
||||
XVID_ME_BFRAME_EARLYSTOP;
|
||||
vop_remove = ~XVID_VOP_MODEDECISION_RD &
|
||||
~XVID_VOP_FAST_MODEDECISION_RD &
|
||||
~XVID_VOP_TRELLISQUANT &
|
||||
~XVID_VOP_INTER4V &
|
||||
~XVID_VOP_HQACPRED;
|
||||
|
||||
param->vol_flags &= ~XVID_VOL_GMC;
|
||||
param->vop_flags &= vop_remove;
|
||||
param->motion_flags &= motion_remove;
|
||||
param->motion_flags |= motion_replacements;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Capture statistic data and write it during first pass.
|
||||
*
|
||||
* @param ref Context pointer for the plugin
|
||||
* @param param Statistic data
|
||||
* @return Returns XVID_ERR_xxxx on failure, or 0 on success
|
||||
*/
|
||||
static int xvid_ff_2pass_after(struct xvid_context *ref,
|
||||
xvid_plg_data_t *param) {
|
||||
char *log = ref->twopassbuffer;
|
||||
const char *frame_types = " ipbs";
|
||||
char frame_type;
|
||||
|
||||
/* Quick bounds check */
|
||||
if( log == NULL )
|
||||
return XVID_ERR_FAIL;
|
||||
|
||||
/* Convert the type given to us into a character */
|
||||
if( param->type < 5 && param->type > 0 ) {
|
||||
frame_type = frame_types[param->type];
|
||||
} else {
|
||||
return XVID_ERR_FAIL;
|
||||
}
|
||||
|
||||
snprintf(BUFFER_CAT(log), BUFFER_REMAINING(log),
|
||||
"%c %d %d %d %d %d %d\n",
|
||||
frame_type, param->stats.quant, param->stats.kblks, param->stats.mblks,
|
||||
param->stats.ublks, param->stats.length, param->stats.hlength);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dispatch function for our custom plugin.
|
||||
* This handles the dispatch for the Xvid plugin. It passes data
|
||||
* on to other functions for actual processing.
|
||||
*
|
||||
* @param ref Context pointer for the plugin
|
||||
* @param cmd The task given for us to complete
|
||||
* @param p1 First parameter (varies)
|
||||
* @param p2 Second parameter (varies)
|
||||
* @return Returns XVID_ERR_xxxx on failure, or 0 on success
|
||||
*/
|
||||
static int xvid_ff_2pass(void *ref, int cmd, void *p1, void *p2)
|
||||
{
|
||||
switch( cmd ) {
|
||||
case XVID_PLG_INFO:
|
||||
case XVID_PLG_FRAME:
|
||||
return 0;
|
||||
|
||||
case XVID_PLG_BEFORE:
|
||||
return xvid_ff_2pass_before(ref, p1);
|
||||
|
||||
case XVID_PLG_CREATE:
|
||||
return xvid_ff_2pass_create(p1, p2);
|
||||
|
||||
case XVID_PLG_AFTER:
|
||||
return xvid_ff_2pass_after(ref, p1);
|
||||
|
||||
case XVID_PLG_DESTROY:
|
||||
return xvid_ff_2pass_destroy(ref, p1);
|
||||
|
||||
default:
|
||||
return XVID_ERR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Routine to create a global VO/VOL header for MP4 container.
|
||||
* What we do here is extract the header from the Xvid bitstream
|
||||
* as it is encoded. We also strip the repeated headers from the
|
||||
* bitstream when a global header is requested for MPEG-4 ISO
|
||||
* compliance.
|
||||
*
|
||||
* @param avctx AVCodecContext pointer to context
|
||||
* @param frame Pointer to encoded frame data
|
||||
* @param header_len Length of header to search
|
||||
* @param frame_len Length of encoded frame data
|
||||
* @return Returns new length of frame data
|
||||
*/
|
||||
static int xvid_strip_vol_header(AVCodecContext *avctx,
|
||||
AVPacket *pkt,
|
||||
unsigned int header_len,
|
||||
unsigned int frame_len) {
|
||||
int vo_len = 0, i;
|
||||
|
||||
for( i = 0; i < header_len - 3; i++ ) {
|
||||
if( pkt->data[i] == 0x00 &&
|
||||
pkt->data[i+1] == 0x00 &&
|
||||
pkt->data[i+2] == 0x01 &&
|
||||
pkt->data[i+3] == 0xB6 ) {
|
||||
vo_len = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if( vo_len > 0 ) {
|
||||
/* We need to store the header, so extract it */
|
||||
if( avctx->extradata == NULL ) {
|
||||
avctx->extradata = av_malloc(vo_len);
|
||||
memcpy(avctx->extradata, pkt->data, vo_len);
|
||||
avctx->extradata_size = vo_len;
|
||||
}
|
||||
/* Less dangerous now, memmove properly copies the two
|
||||
chunks of overlapping data */
|
||||
memmove(pkt->data, &pkt->data[vo_len], frame_len - vo_len);
|
||||
pkt->size = frame_len - vo_len;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Routine to correct a possibly erroneous framerate being fed to us.
|
||||
* Xvid currently chokes on framerates where the ticks per frame is
|
||||
* extremely large. This function works to correct problems in this area
|
||||
* by estimating a new framerate and taking the simpler fraction of
|
||||
* the two presented.
|
||||
*
|
||||
* @param avctx Context that contains the framerate to correct.
|
||||
*/
|
||||
static void xvid_correct_framerate(AVCodecContext *avctx)
|
||||
{
|
||||
int frate, fbase;
|
||||
int est_frate, est_fbase;
|
||||
int gcd;
|
||||
float est_fps, fps;
|
||||
|
||||
frate = avctx->time_base.den;
|
||||
fbase = avctx->time_base.num;
|
||||
|
||||
gcd = av_gcd(frate, fbase);
|
||||
if( gcd > 1 ) {
|
||||
frate /= gcd;
|
||||
fbase /= gcd;
|
||||
}
|
||||
|
||||
if( frate <= 65000 && fbase <= 65000 ) {
|
||||
avctx->time_base.den = frate;
|
||||
avctx->time_base.num = fbase;
|
||||
return;
|
||||
}
|
||||
|
||||
fps = (float)frate / (float)fbase;
|
||||
est_fps = roundf(fps * 1000.0) / 1000.0;
|
||||
|
||||
est_frate = (int)est_fps;
|
||||
if( est_fps > (int)est_fps ) {
|
||||
est_frate = (est_frate + 1) * 1000;
|
||||
est_fbase = (int)roundf((float)est_frate / est_fps);
|
||||
} else
|
||||
est_fbase = 1;
|
||||
|
||||
gcd = av_gcd(est_frate, est_fbase);
|
||||
if( gcd > 1 ) {
|
||||
est_frate /= gcd;
|
||||
est_fbase /= gcd;
|
||||
}
|
||||
|
||||
if( fbase > est_fbase ) {
|
||||
avctx->time_base.den = est_frate;
|
||||
avctx->time_base.num = est_fbase;
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
"Xvid: framerate re-estimated: %.2f, %.3f%% correction\n",
|
||||
est_fps, (((est_fps - fps)/fps) * 100.0));
|
||||
} else {
|
||||
avctx->time_base.den = frate;
|
||||
avctx->time_base.num = fbase;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the private context for the encoder.
|
||||
* All buffers are allocated, settings are loaded from the user,
|
||||
@ -508,274 +773,6 @@ static av_cold int xvid_encode_close(AVCodecContext *avctx) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Routine to create a global VO/VOL header for MP4 container.
|
||||
* What we do here is extract the header from the Xvid bitstream
|
||||
* as it is encoded. We also strip the repeated headers from the
|
||||
* bitstream when a global header is requested for MPEG-4 ISO
|
||||
* compliance.
|
||||
*
|
||||
* @param avctx AVCodecContext pointer to context
|
||||
* @param frame Pointer to encoded frame data
|
||||
* @param header_len Length of header to search
|
||||
* @param frame_len Length of encoded frame data
|
||||
* @return Returns new length of frame data
|
||||
*/
|
||||
int xvid_strip_vol_header(AVCodecContext *avctx,
|
||||
AVPacket *pkt,
|
||||
unsigned int header_len,
|
||||
unsigned int frame_len) {
|
||||
int vo_len = 0, i;
|
||||
|
||||
for( i = 0; i < header_len - 3; i++ ) {
|
||||
if( pkt->data[i] == 0x00 &&
|
||||
pkt->data[i+1] == 0x00 &&
|
||||
pkt->data[i+2] == 0x01 &&
|
||||
pkt->data[i+3] == 0xB6 ) {
|
||||
vo_len = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if( vo_len > 0 ) {
|
||||
/* We need to store the header, so extract it */
|
||||
if( avctx->extradata == NULL ) {
|
||||
avctx->extradata = av_malloc(vo_len);
|
||||
memcpy(avctx->extradata, pkt->data, vo_len);
|
||||
avctx->extradata_size = vo_len;
|
||||
}
|
||||
/* Less dangerous now, memmove properly copies the two
|
||||
chunks of overlapping data */
|
||||
memmove(pkt->data, &pkt->data[vo_len], frame_len - vo_len);
|
||||
pkt->size = frame_len - vo_len;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Routine to correct a possibly erroneous framerate being fed to us.
|
||||
* Xvid currently chokes on framerates where the ticks per frame is
|
||||
* extremely large. This function works to correct problems in this area
|
||||
* by estimating a new framerate and taking the simpler fraction of
|
||||
* the two presented.
|
||||
*
|
||||
* @param avctx Context that contains the framerate to correct.
|
||||
*/
|
||||
void xvid_correct_framerate(AVCodecContext *avctx) {
|
||||
int frate, fbase;
|
||||
int est_frate, est_fbase;
|
||||
int gcd;
|
||||
float est_fps, fps;
|
||||
|
||||
frate = avctx->time_base.den;
|
||||
fbase = avctx->time_base.num;
|
||||
|
||||
gcd = av_gcd(frate, fbase);
|
||||
if( gcd > 1 ) {
|
||||
frate /= gcd;
|
||||
fbase /= gcd;
|
||||
}
|
||||
|
||||
if( frate <= 65000 && fbase <= 65000 ) {
|
||||
avctx->time_base.den = frate;
|
||||
avctx->time_base.num = fbase;
|
||||
return;
|
||||
}
|
||||
|
||||
fps = (float)frate / (float)fbase;
|
||||
est_fps = roundf(fps * 1000.0) / 1000.0;
|
||||
|
||||
est_frate = (int)est_fps;
|
||||
if( est_fps > (int)est_fps ) {
|
||||
est_frate = (est_frate + 1) * 1000;
|
||||
est_fbase = (int)roundf((float)est_frate / est_fps);
|
||||
} else
|
||||
est_fbase = 1;
|
||||
|
||||
gcd = av_gcd(est_frate, est_fbase);
|
||||
if( gcd > 1 ) {
|
||||
est_frate /= gcd;
|
||||
est_fbase /= gcd;
|
||||
}
|
||||
|
||||
if( fbase > est_fbase ) {
|
||||
avctx->time_base.den = est_frate;
|
||||
avctx->time_base.num = est_fbase;
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
"Xvid: framerate re-estimated: %.2f, %.3f%% correction\n",
|
||||
est_fps, (((est_fps - fps)/fps) * 100.0));
|
||||
} else {
|
||||
avctx->time_base.den = frate;
|
||||
avctx->time_base.num = fbase;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Xvid 2-Pass Kludge Section
|
||||
*
|
||||
* Xvid's default 2-pass doesn't allow us to create data as we need to, so
|
||||
* this section spends time replacing the first pass plugin so we can write
|
||||
* statistic information as libavcodec requests in. We have another kludge
|
||||
* that allows us to pass data to the second pass in Xvid without a custom
|
||||
* rate-control plugin.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Initialize the two-pass plugin and context.
|
||||
*
|
||||
* @param param Input construction parameter structure
|
||||
* @param handle Private context handle
|
||||
* @return Returns XVID_ERR_xxxx on failure, or 0 on success.
|
||||
*/
|
||||
static int xvid_ff_2pass_create(xvid_plg_create_t * param,
|
||||
void ** handle) {
|
||||
struct xvid_ff_pass1 *x = (struct xvid_ff_pass1 *)param->param;
|
||||
char *log = x->context->twopassbuffer;
|
||||
|
||||
/* Do a quick bounds check */
|
||||
if( log == NULL )
|
||||
return XVID_ERR_FAIL;
|
||||
|
||||
/* We use snprintf() */
|
||||
/* This is because we can safely prevent a buffer overflow */
|
||||
log[0] = 0;
|
||||
snprintf(log, BUFFER_REMAINING(log),
|
||||
"# ffmpeg 2-pass log file, using xvid codec\n");
|
||||
snprintf(BUFFER_CAT(log), BUFFER_REMAINING(log),
|
||||
"# Do not modify. libxvidcore version: %d.%d.%d\n\n",
|
||||
XVID_VERSION_MAJOR(XVID_VERSION),
|
||||
XVID_VERSION_MINOR(XVID_VERSION),
|
||||
XVID_VERSION_PATCH(XVID_VERSION));
|
||||
|
||||
*handle = x->context;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy the two-pass plugin context.
|
||||
*
|
||||
* @param ref Context pointer for the plugin
|
||||
* @param param Destrooy context
|
||||
* @return Returns 0, success guaranteed
|
||||
*/
|
||||
static int xvid_ff_2pass_destroy(struct xvid_context *ref,
|
||||
xvid_plg_destroy_t *param) {
|
||||
/* Currently cannot think of anything to do on destruction */
|
||||
/* Still, the framework should be here for reference/use */
|
||||
if( ref->twopassbuffer != NULL )
|
||||
ref->twopassbuffer[0] = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable fast encode mode during the first pass.
|
||||
*
|
||||
* @param ref Context pointer for the plugin
|
||||
* @param param Frame data
|
||||
* @return Returns 0, success guaranteed
|
||||
*/
|
||||
static int xvid_ff_2pass_before(struct xvid_context *ref,
|
||||
xvid_plg_data_t *param) {
|
||||
int motion_remove;
|
||||
int motion_replacements;
|
||||
int vop_remove;
|
||||
|
||||
/* Nothing to do here, result is changed too much */
|
||||
if( param->zone && param->zone->mode == XVID_ZONE_QUANT )
|
||||
return 0;
|
||||
|
||||
/* We can implement a 'turbo' first pass mode here */
|
||||
param->quant = 2;
|
||||
|
||||
/* Init values */
|
||||
motion_remove = ~XVID_ME_CHROMA_PVOP &
|
||||
~XVID_ME_CHROMA_BVOP &
|
||||
~XVID_ME_EXTSEARCH16 &
|
||||
~XVID_ME_ADVANCEDDIAMOND16;
|
||||
motion_replacements = XVID_ME_FAST_MODEINTERPOLATE |
|
||||
XVID_ME_SKIP_DELTASEARCH |
|
||||
XVID_ME_FASTREFINE16 |
|
||||
XVID_ME_BFRAME_EARLYSTOP;
|
||||
vop_remove = ~XVID_VOP_MODEDECISION_RD &
|
||||
~XVID_VOP_FAST_MODEDECISION_RD &
|
||||
~XVID_VOP_TRELLISQUANT &
|
||||
~XVID_VOP_INTER4V &
|
||||
~XVID_VOP_HQACPRED;
|
||||
|
||||
param->vol_flags &= ~XVID_VOL_GMC;
|
||||
param->vop_flags &= vop_remove;
|
||||
param->motion_flags &= motion_remove;
|
||||
param->motion_flags |= motion_replacements;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Capture statistic data and write it during first pass.
|
||||
*
|
||||
* @param ref Context pointer for the plugin
|
||||
* @param param Statistic data
|
||||
* @return Returns XVID_ERR_xxxx on failure, or 0 on success
|
||||
*/
|
||||
static int xvid_ff_2pass_after(struct xvid_context *ref,
|
||||
xvid_plg_data_t *param) {
|
||||
char *log = ref->twopassbuffer;
|
||||
const char *frame_types = " ipbs";
|
||||
char frame_type;
|
||||
|
||||
/* Quick bounds check */
|
||||
if( log == NULL )
|
||||
return XVID_ERR_FAIL;
|
||||
|
||||
/* Convert the type given to us into a character */
|
||||
if( param->type < 5 && param->type > 0 ) {
|
||||
frame_type = frame_types[param->type];
|
||||
} else {
|
||||
return XVID_ERR_FAIL;
|
||||
}
|
||||
|
||||
snprintf(BUFFER_CAT(log), BUFFER_REMAINING(log),
|
||||
"%c %d %d %d %d %d %d\n",
|
||||
frame_type, param->stats.quant, param->stats.kblks, param->stats.mblks,
|
||||
param->stats.ublks, param->stats.length, param->stats.hlength);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dispatch function for our custom plugin.
|
||||
* This handles the dispatch for the Xvid plugin. It passes data
|
||||
* on to other functions for actual processing.
|
||||
*
|
||||
* @param ref Context pointer for the plugin
|
||||
* @param cmd The task given for us to complete
|
||||
* @param p1 First parameter (varies)
|
||||
* @param p2 Second parameter (varies)
|
||||
* @return Returns XVID_ERR_xxxx on failure, or 0 on success
|
||||
*/
|
||||
int xvid_ff_2pass(void *ref, int cmd, void *p1, void *p2) {
|
||||
switch( cmd ) {
|
||||
case XVID_PLG_INFO:
|
||||
case XVID_PLG_FRAME:
|
||||
return 0;
|
||||
|
||||
case XVID_PLG_BEFORE:
|
||||
return xvid_ff_2pass_before(ref, p1);
|
||||
|
||||
case XVID_PLG_CREATE:
|
||||
return xvid_ff_2pass_create(p1, p2);
|
||||
|
||||
case XVID_PLG_AFTER:
|
||||
return xvid_ff_2pass_after(ref, p1);
|
||||
|
||||
case XVID_PLG_DESTROY:
|
||||
return xvid_ff_2pass_destroy(ref, p1);
|
||||
|
||||
default:
|
||||
return XVID_ERR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Xvid codec definition for libavcodec.
|
||||
*/
|
||||
|
@ -140,7 +140,6 @@ typedef struct {
|
||||
/// Parameters built from header parameters, do not change during playback
|
||||
int group_order; ///< order of frame group
|
||||
int fft_order; ///< order of FFT (actually fftorder+1)
|
||||
int fft_frame_size; ///< size of fft frame, in components (1 comples = re + im)
|
||||
int frame_size; ///< size of data frame
|
||||
int frequency_range;
|
||||
int sub_sampling; ///< subsampling: 0=25%, 1=50%, 2=100% */
|
||||
@ -1607,13 +1606,17 @@ static void qdm2_fft_tone_synthesizer (QDM2Context *q, int sub_packet)
|
||||
static void qdm2_calculate_fft (QDM2Context *q, int channel, int sub_packet)
|
||||
{
|
||||
const float gain = (q->channels == 1 && q->nb_channels == 2) ? 0.5f : 1.0f;
|
||||
float *out = q->output_buffer + channel;
|
||||
int i;
|
||||
q->fft.complex[channel][0].re *= 2.0f;
|
||||
q->fft.complex[channel][0].im = 0.0f;
|
||||
q->rdft_ctx.rdft_calc(&q->rdft_ctx, (FFTSample *)q->fft.complex[channel]);
|
||||
/* add samples to output buffer */
|
||||
for (i = 0; i < ((q->fft_frame_size + 15) & ~15); i++)
|
||||
q->output_buffer[q->channels * i + channel] += ((float *) q->fft.complex[channel])[i] * gain;
|
||||
for (i = 0; i < FFALIGN(q->fft_size, 8); i++) {
|
||||
out[0] += q->fft.complex[channel][i].re * gain;
|
||||
out[q->channels] += q->fft.complex[channel][i].im * gain;
|
||||
out += 2 * q->channels;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1688,7 +1691,6 @@ static void dump_context(QDM2Context *q)
|
||||
PRINT("checksum_size",q->checksum_size);
|
||||
PRINT("channels",q->channels);
|
||||
PRINT("nb_channels",q->nb_channels);
|
||||
PRINT("fft_frame_size",q->fft_frame_size);
|
||||
PRINT("fft_size",q->fft_size);
|
||||
PRINT("sub_sampling",q->sub_sampling);
|
||||
PRINT("fft_order",q->fft_order);
|
||||
@ -1843,7 +1845,6 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
s->fft_order = av_log2(s->fft_size) + 1;
|
||||
s->fft_frame_size = 2 * s->fft_size; // complex has two floats
|
||||
|
||||
// something like max decodable tones
|
||||
s->group_order = av_log2(s->group_size) + 1;
|
||||
|
@ -52,35 +52,39 @@ typedef struct TiffContext {
|
||||
|
||||
int strips, rps, sstype;
|
||||
int sot;
|
||||
const uint8_t* stripdata;
|
||||
const uint8_t* stripsizes;
|
||||
const uint8_t *stripdata;
|
||||
const uint8_t *stripsizes;
|
||||
int stripsize, stripoff;
|
||||
LZWState *lzw;
|
||||
} TiffContext;
|
||||
|
||||
static unsigned tget_short(const uint8_t **p, int le) {
|
||||
static unsigned tget_short(const uint8_t **p, int le)
|
||||
{
|
||||
unsigned v = le ? AV_RL16(*p) : AV_RB16(*p);
|
||||
*p += 2;
|
||||
return v;
|
||||
}
|
||||
|
||||
static unsigned tget_long(const uint8_t **p, int le) {
|
||||
static unsigned tget_long(const uint8_t **p, int le)
|
||||
{
|
||||
unsigned v = le ? AV_RL32(*p) : AV_RB32(*p);
|
||||
*p += 4;
|
||||
return v;
|
||||
}
|
||||
|
||||
static unsigned tget(const uint8_t **p, int type, int le) {
|
||||
switch(type){
|
||||
static unsigned tget(const uint8_t **p, int type, int le)
|
||||
{
|
||||
switch (type) {
|
||||
case TIFF_BYTE : return *(*p)++;
|
||||
case TIFF_SHORT: return tget_short(p, le);
|
||||
case TIFF_LONG : return tget_long (p, le);
|
||||
case TIFF_LONG : return tget_long(p, le);
|
||||
default : return UINT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_ZLIB
|
||||
static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src, int size)
|
||||
static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
|
||||
int size)
|
||||
{
|
||||
z_stream zstream = { 0 };
|
||||
int zret;
|
||||
@ -141,7 +145,9 @@ static void av_always_inline horizontal_fill(unsigned int bpp, uint8_t* dst,
|
||||
}
|
||||
}
|
||||
|
||||
static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uint8_t *src, int size, int lines){
|
||||
static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride,
|
||||
const uint8_t *src, int size, int lines)
|
||||
{
|
||||
int c, line, pixels, code;
|
||||
const uint8_t *ssrc = src;
|
||||
int width = ((s->width * s->bpp) + 7) >> 3;
|
||||
@ -150,21 +156,24 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uin
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
#if CONFIG_ZLIB
|
||||
if(s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE){
|
||||
uint8_t *zbuf; unsigned long outlen;
|
||||
if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
|
||||
uint8_t *zbuf;
|
||||
unsigned long outlen;
|
||||
int ret;
|
||||
outlen = width * lines;
|
||||
zbuf = av_malloc(outlen);
|
||||
if (!zbuf)
|
||||
return AVERROR(ENOMEM);
|
||||
ret = tiff_uncompress(zbuf, &outlen, src, size);
|
||||
if(ret != Z_OK){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Uncompressing failed (%lu of %lu) with error %d\n", outlen, (unsigned long)width * lines, ret);
|
||||
if (ret != Z_OK) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Uncompressing failed (%lu of %lu) with error %d\n", outlen,
|
||||
(unsigned long)width * lines, ret);
|
||||
av_free(zbuf);
|
||||
return -1;
|
||||
}
|
||||
src = zbuf;
|
||||
for(line = 0; line < lines; line++){
|
||||
for (line = 0; line < lines; line++) {
|
||||
if(s->bpp < 8 && s->avctx->pix_fmt == PIX_FMT_PAL8){
|
||||
horizontal_fill(s->bpp, dst, 1, src, 0, width, 0);
|
||||
}else{
|
||||
@ -177,37 +186,42 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uin
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
if(s->compr == TIFF_LZW){
|
||||
if(ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF) < 0){
|
||||
if (s->compr == TIFF_LZW) {
|
||||
if (ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF) < 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if(s->compr == TIFF_CCITT_RLE || s->compr == TIFF_G3 || s->compr == TIFF_G4){
|
||||
if (s->compr == TIFF_CCITT_RLE || s->compr == TIFF_G3
|
||||
|| s->compr == TIFF_G4) {
|
||||
int i, ret = 0;
|
||||
uint8_t *src2 = av_malloc((unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
uint8_t *src2 = av_malloc((unsigned)size +
|
||||
FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if (!src2) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Error allocating temporary buffer\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
if(s->fax_opts & 2){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Uncompressed fax mode is not supported (yet)\n");
|
||||
if (s->fax_opts & 2) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Uncompressed fax mode is not supported (yet)\n");
|
||||
av_free(src2);
|
||||
return -1;
|
||||
}
|
||||
if(!s->fill_order){
|
||||
if (!s->fill_order) {
|
||||
memcpy(src2, src, size);
|
||||
}else{
|
||||
for(i = 0; i < size; i++)
|
||||
} else {
|
||||
for (i = 0; i < size; i++)
|
||||
src2[i] = av_reverse[src[i]];
|
||||
}
|
||||
memset(src2+size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
switch(s->compr){
|
||||
memset(src2 + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
switch (s->compr) {
|
||||
case TIFF_CCITT_RLE:
|
||||
case TIFF_G3:
|
||||
case TIFF_G4:
|
||||
ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride, s->compr, s->fax_opts);
|
||||
ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
|
||||
s->compr, s->fax_opts);
|
||||
break;
|
||||
}
|
||||
if (s->bpp < 8 && s->avctx->pix_fmt == PIX_FMT_PAL8)
|
||||
@ -218,12 +232,12 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uin
|
||||
av_free(src2);
|
||||
return ret;
|
||||
}
|
||||
for(line = 0; line < lines; line++){
|
||||
if(src - ssrc > size){
|
||||
for (line = 0; line < lines; line++) {
|
||||
if (src - ssrc > size) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
|
||||
return -1;
|
||||
}
|
||||
switch(s->compr){
|
||||
switch (s->compr) {
|
||||
case TIFF_RAW:
|
||||
if (ssrc + size - src < width)
|
||||
return AVERROR_INVALIDDATA;
|
||||
@ -238,22 +252,24 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uin
|
||||
src += width;
|
||||
break;
|
||||
case TIFF_PACKBITS:
|
||||
for(pixels = 0; pixels < width;){
|
||||
code = (int8_t)*src++;
|
||||
if(code >= 0){
|
||||
for (pixels = 0; pixels < width;) {
|
||||
code = (int8_t) * src++;
|
||||
if (code >= 0) {
|
||||
code++;
|
||||
if(pixels + code > width){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Copy went out of bounds\n");
|
||||
if (pixels + code > width) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Copy went out of bounds\n");
|
||||
return -1;
|
||||
}
|
||||
horizontal_fill(s->bpp * (s->avctx->pix_fmt == PIX_FMT_PAL8),
|
||||
dst, 1, src, 0, code, pixels);
|
||||
src += code;
|
||||
pixels += code;
|
||||
}else if(code != -128){ // -127..-1
|
||||
} else if (code != -128) { // -127..-1
|
||||
code = (-code) + 1;
|
||||
if(pixels + code > width){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Run went out of bounds\n");
|
||||
if (pixels + code > width) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Run went out of bounds\n");
|
||||
return -1;
|
||||
}
|
||||
c = *src++;
|
||||
@ -265,8 +281,9 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uin
|
||||
break;
|
||||
case TIFF_LZW:
|
||||
pixels = ff_lzw_decode(s->lzw, dst, width);
|
||||
if(pixels < width){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n", pixels, width);
|
||||
if (pixels < width) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
|
||||
pixels, width);
|
||||
return -1;
|
||||
}
|
||||
if (s->bpp < 8 && s->avctx->pix_fmt == PIX_FMT_PAL8)
|
||||
@ -342,7 +359,8 @@ static int init_image(TiffContext *s)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *buf, const uint8_t *end_buf)
|
||||
static int tiff_decode_tag(TiffContext *s, const uint8_t *start,
|
||||
const uint8_t *buf, const uint8_t *end_buf)
|
||||
{
|
||||
unsigned tag, type, count, off, value = 0;
|
||||
int i, j;
|
||||
@ -357,12 +375,13 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
off = tget_long(&buf, s->le);
|
||||
|
||||
if (type == 0 || type >= FF_ARRAY_ELEMS(type_sizes)) {
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Unknown tiff type (%u) encountered\n", type);
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Unknown tiff type (%u) encountered\n",
|
||||
type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(count == 1){
|
||||
switch(type){
|
||||
if (count == 1) {
|
||||
switch (type) {
|
||||
case TIFF_BYTE:
|
||||
case TIFF_SHORT:
|
||||
buf -= 4;
|
||||
@ -374,7 +393,7 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
buf = NULL;
|
||||
break;
|
||||
case TIFF_STRING:
|
||||
if(count <= 4){
|
||||
if (count <= 4) {
|
||||
buf -= 4;
|
||||
break;
|
||||
}
|
||||
@ -390,12 +409,13 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
}
|
||||
}
|
||||
|
||||
if(buf && (buf < start || buf > end_buf)){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
|
||||
if (buf && (buf < start || buf > end_buf)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Tag referencing position outside the image\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch(tag){
|
||||
switch (tag) {
|
||||
case TIFF_WIDTH:
|
||||
s->width = value;
|
||||
break;
|
||||
@ -404,20 +424,25 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
break;
|
||||
case TIFF_BPP:
|
||||
s->bppcount = count;
|
||||
if(count > 4){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", s->bpp, count);
|
||||
if (count > 4) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"This format is not supported (bpp=%d, %d components)\n",
|
||||
s->bpp, count);
|
||||
return -1;
|
||||
}
|
||||
if(count == 1) s->bpp = value;
|
||||
else{
|
||||
switch(type){
|
||||
if (count == 1)
|
||||
s->bpp = value;
|
||||
else {
|
||||
switch (type) {
|
||||
case TIFF_BYTE:
|
||||
s->bpp = (off & 0xFF) + ((off >> 8) & 0xFF) + ((off >> 16) & 0xFF) + ((off >> 24) & 0xFF);
|
||||
s->bpp = (off & 0xFF) + ((off >> 8) & 0xFF) +
|
||||
((off >> 16) & 0xFF) + ((off >> 24) & 0xFF);
|
||||
break;
|
||||
case TIFF_SHORT:
|
||||
case TIFF_LONG:
|
||||
s->bpp = 0;
|
||||
for(i = 0; i < count && buf < end_buf; i++) s->bpp += tget(&buf, type, s->le);
|
||||
for (i = 0; i < count && buf < end_buf; i++)
|
||||
s->bpp += tget(&buf, type, s->le);
|
||||
break;
|
||||
default:
|
||||
s->bpp = -1;
|
||||
@ -437,7 +462,7 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
case TIFF_COMPR:
|
||||
s->compr = value;
|
||||
s->predictor = 0;
|
||||
switch(s->compr){
|
||||
switch (s->compr) {
|
||||
case TIFF_RAW:
|
||||
case TIFF_PACKBITS:
|
||||
case TIFF_LZW:
|
||||
@ -457,48 +482,54 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
#endif
|
||||
case TIFF_JPEG:
|
||||
case TIFF_NEWJPEG:
|
||||
av_log(s->avctx, AV_LOG_ERROR, "JPEG compression is not supported\n");
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"JPEG compression is not supported\n");
|
||||
return -1;
|
||||
default:
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n", s->compr);
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
|
||||
s->compr);
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case TIFF_ROWSPERSTRIP:
|
||||
if (type == TIFF_LONG && value == UINT_MAX)
|
||||
value = s->avctx->height;
|
||||
if(value < 1){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Incorrect value of rows per strip\n");
|
||||
if (value < 1) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Incorrect value of rows per strip\n");
|
||||
return -1;
|
||||
}
|
||||
s->rps = value;
|
||||
break;
|
||||
case TIFF_STRIP_OFFS:
|
||||
if(count == 1){
|
||||
if (count == 1) {
|
||||
s->stripdata = NULL;
|
||||
s->stripoff = value;
|
||||
}else
|
||||
} else
|
||||
s->stripdata = start + off;
|
||||
s->strips = count;
|
||||
if(s->strips == 1) s->rps = s->height;
|
||||
if (s->strips == 1)
|
||||
s->rps = s->height;
|
||||
s->sot = type;
|
||||
if(s->stripdata > end_buf){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
|
||||
if (s->stripdata > end_buf) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Tag referencing position outside the image\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case TIFF_STRIP_SIZE:
|
||||
if(count == 1){
|
||||
if (count == 1) {
|
||||
s->stripsizes = NULL;
|
||||
s->stripsize = value;
|
||||
s->strips = 1;
|
||||
}else{
|
||||
} else {
|
||||
s->stripsizes = start + off;
|
||||
}
|
||||
s->strips = count;
|
||||
s->sstype = type;
|
||||
if(s->stripsizes > end_buf){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
|
||||
if (s->stripsizes > end_buf) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Tag referencing position outside the image\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
@ -513,7 +544,7 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
s->predictor = value;
|
||||
break;
|
||||
case TIFF_INVERT:
|
||||
switch(value){
|
||||
switch (value) {
|
||||
case 0:
|
||||
s->invert = 1;
|
||||
break;
|
||||
@ -524,13 +555,15 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
case 3:
|
||||
break;
|
||||
default:
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n", value);
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n",
|
||||
value);
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case TIFF_FILL_ORDER:
|
||||
if(value < 1 || value > 2){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Unknown FillOrder value %d, trying default one\n", value);
|
||||
if (value < 1 || value > 2) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Unknown FillOrder value %d, trying default one\n", value);
|
||||
value = 1;
|
||||
}
|
||||
s->fill_order = value - 1;
|
||||
@ -544,44 +577,44 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
gp = buf + count / 3 * off;
|
||||
bp = buf + count / 3 * off * 2;
|
||||
off = (type_sizes[type] - 1) << 3;
|
||||
for(i = 0; i < count / 3; i++){
|
||||
for (i = 0; i < count / 3; i++) {
|
||||
j = 0xff << 24;
|
||||
j |= (tget(&rp, type, s->le) >> off) << 16;
|
||||
j |= (tget(&gp, type, s->le) >> off) << 8;
|
||||
j |= tget(&bp, type, s->le) >> off;
|
||||
j |= tget(&bp, type, s->le) >> off;
|
||||
pal[i] = j;
|
||||
}
|
||||
s->palette_is_set = 1;
|
||||
break;
|
||||
case TIFF_PLANAR:
|
||||
if(value == 2){
|
||||
if (value == 2) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Planar format is not supported\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case TIFF_T4OPTIONS:
|
||||
if(s->compr == TIFF_G3)
|
||||
if (s->compr == TIFF_G3)
|
||||
s->fax_opts = value;
|
||||
break;
|
||||
case TIFF_T6OPTIONS:
|
||||
if(s->compr == TIFF_G4)
|
||||
if (s->compr == TIFF_G4)
|
||||
s->fax_opts = value;
|
||||
break;
|
||||
default:
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Unknown or unsupported tag %d/0X%0X\n", tag, tag);
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Unknown or unsupported tag %d/0X%0X\n",
|
||||
tag, tag);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *data_size,
|
||||
AVPacket *avpkt)
|
||||
void *data, int *data_size, AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
TiffContext * const s = avctx->priv_data;
|
||||
TiffContext *const s = avctx->priv_data;
|
||||
AVFrame *picture = data;
|
||||
AVFrame * const p = &s->picture;
|
||||
AVFrame *const p = &s->picture;
|
||||
const uint8_t *orig_buf = buf, *end_buf = buf + buf_size;
|
||||
unsigned off;
|
||||
int id, le, ret;
|
||||
@ -593,10 +626,13 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
//parse image header
|
||||
if (end_buf - buf < 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
id = AV_RL16(buf); buf += 2;
|
||||
if(id == 0x4949) le = 1;
|
||||
else if(id == 0x4D4D) le = 0;
|
||||
else{
|
||||
id = AV_RL16(buf);
|
||||
buf += 2;
|
||||
if (id == 0x4949)
|
||||
le = 1;
|
||||
else if (id == 0x4D4D)
|
||||
le = 0;
|
||||
else {
|
||||
av_log(avctx, AV_LOG_ERROR, "TIFF header not found\n");
|
||||
return -1;
|
||||
}
|
||||
@ -606,8 +642,9 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
s->fill_order = 0;
|
||||
// As TIFF 6.0 specification puts it "An arbitrary but carefully chosen number
|
||||
// that further identifies the file as a TIFF file"
|
||||
if(tget_short(&buf, le) != 42){
|
||||
av_log(avctx, AV_LOG_ERROR, "The answer to life, universe and everything is not correct!\n");
|
||||
if (tget_short(&buf, le) != 42) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"The answer to life, universe and everything is not correct!\n");
|
||||
return -1;
|
||||
}
|
||||
// Reset these pointers so we can tell if they were set this frame
|
||||
@ -620,12 +657,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
buf = orig_buf + off;
|
||||
entries = tget_short(&buf, le);
|
||||
for(i = 0; i < entries; i++){
|
||||
if(tiff_decode_tag(s, orig_buf, buf, end_buf) < 0)
|
||||
for (i = 0; i < entries; i++) {
|
||||
if (tiff_decode_tag(s, orig_buf, buf, end_buf) < 0)
|
||||
return -1;
|
||||
buf += 12;
|
||||
}
|
||||
if(!s->stripdata && !s->stripoff){
|
||||
if (!s->stripdata && !s->stripoff) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
|
||||
return -1;
|
||||
}
|
||||
@ -633,36 +670,37 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
if ((ret = init_image(s)) < 0)
|
||||
return ret;
|
||||
|
||||
if(s->strips == 1 && !s->stripsize){
|
||||
if (s->strips == 1 && !s->stripsize) {
|
||||
av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
|
||||
s->stripsize = buf_size - s->stripoff;
|
||||
}
|
||||
stride = p->linesize[0];
|
||||
dst = p->data[0];
|
||||
for(i = 0; i < s->height; i += s->rps){
|
||||
if(s->stripsizes) {
|
||||
for (i = 0; i < s->height; i += s->rps) {
|
||||
if (s->stripsizes) {
|
||||
if (s->stripsizes >= end_buf)
|
||||
return AVERROR_INVALIDDATA;
|
||||
ssize = tget(&s->stripsizes, s->sstype, s->le);
|
||||
} else
|
||||
ssize = s->stripsize;
|
||||
|
||||
if(s->stripdata){
|
||||
if (s->stripdata) {
|
||||
if (s->stripdata >= end_buf)
|
||||
return AVERROR_INVALIDDATA;
|
||||
soff = tget(&s->stripdata, s->sot, s->le);
|
||||
}else
|
||||
} else
|
||||
soff = s->stripoff;
|
||||
|
||||
if (soff > buf_size || ssize > buf_size - soff) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
|
||||
return -1;
|
||||
}
|
||||
if(tiff_unpack_strip(s, dst, stride, orig_buf + soff, ssize, FFMIN(s->rps, s->height - i)) < 0)
|
||||
if (tiff_unpack_strip(s, dst, stride, orig_buf + soff, ssize,
|
||||
FFMIN(s->rps, s->height - i)) < 0)
|
||||
break;
|
||||
dst += s->rps * stride;
|
||||
}
|
||||
if(s->predictor == 2){
|
||||
if (s->predictor == 2) {
|
||||
dst = p->data[0];
|
||||
soff = s->bpp >> 3;
|
||||
ssize = s->width * soff;
|
||||
@ -681,18 +719,18 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
dst += stride;
|
||||
}
|
||||
} else {
|
||||
for(i = 0; i < s->height; i++) {
|
||||
for(j = soff; j < ssize; j++)
|
||||
for (i = 0; i < s->height; i++) {
|
||||
for (j = soff; j < ssize; j++)
|
||||
dst[j] += dst[j - soff];
|
||||
dst += stride;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(s->invert){
|
||||
if (s->invert) {
|
||||
dst = s->picture.data[0];
|
||||
for(i = 0; i < s->height; i++){
|
||||
for(j = 0; j < s->picture.linesize[0]; j++)
|
||||
for (i = 0; i < s->height; i++) {
|
||||
for (j = 0; j < s->picture.linesize[0]; j++)
|
||||
dst[j] = (s->avctx->pix_fmt == PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255) - dst[j];
|
||||
dst += s->picture.linesize[0];
|
||||
}
|
||||
@ -703,7 +741,8 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
static av_cold int tiff_init(AVCodecContext *avctx){
|
||||
static av_cold int tiff_init(AVCodecContext *avctx)
|
||||
{
|
||||
TiffContext *s = avctx->priv_data;
|
||||
|
||||
s->width = 0;
|
||||
@ -719,10 +758,10 @@ static av_cold int tiff_init(AVCodecContext *avctx){
|
||||
|
||||
static av_cold int tiff_end(AVCodecContext *avctx)
|
||||
{
|
||||
TiffContext * const s = avctx->priv_data;
|
||||
TiffContext *const s = avctx->priv_data;
|
||||
|
||||
ff_lzw_decode_close(&s->lzw);
|
||||
if(s->picture.data[0])
|
||||
if (s->picture.data[0])
|
||||
avctx->release_buffer(avctx, &s->picture);
|
||||
return 0;
|
||||
}
|
||||
|
@ -229,7 +229,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
s->max_subframe_len_bit = 0;
|
||||
s->subframe_len_bits = av_log2(log2_max_num_subframes) + 1;
|
||||
|
||||
num_possible_block_sizes = log2_max_num_subframes + 1;
|
||||
s->min_samples_per_subframe = s->samples_per_frame / s->max_num_subframes;
|
||||
s->dynamic_range_compression = s->decode_flags & 0x80;
|
||||
s->bV3RTM = s->decode_flags & 0x100;
|
||||
@ -940,9 +939,10 @@ static int decode_subframe(WmallDecodeCtx *s)
|
||||
|
||||
if (rawpcm_tile) {
|
||||
int bits = s->bits_per_sample - padding_zeroes;
|
||||
if (bits <= 0 ) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "rawpcm_tile bits invalid\n");
|
||||
return -1;
|
||||
if (bits <= 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Invalid number of padding bits in raw PCM tile\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
av_dlog(s->avctx, "RAWPCM %d bits per sample. "
|
||||
"total %d bits, remain=%d\n", bits,
|
||||
@ -1169,8 +1169,7 @@ static int decode_packet(AVCodecContext *avctx, void *data, int *got_frame_ptr,
|
||||
GetBitContext* gb = &s->pgb;
|
||||
const uint8_t* buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
int num_bits_prev_frame, packet_sequence_number,
|
||||
seekable_frame_in_packet, spliced_packet;
|
||||
int num_bits_prev_frame, packet_sequence_number, spliced_packet;
|
||||
|
||||
if (s->packet_done || s->packet_loss) {
|
||||
s->packet_done = 0;
|
||||
@ -1185,9 +1184,11 @@ static int decode_packet(AVCodecContext *avctx, void *data, int *got_frame_ptr,
|
||||
|
||||
/* parse packet header */
|
||||
init_get_bits(gb, buf, s->buf_bit_size);
|
||||
packet_sequence_number = get_bits(gb, 4);
|
||||
seekable_frame_in_packet = get_bits1(gb);
|
||||
spliced_packet = get_bits1(gb);
|
||||
packet_sequence_number = get_bits(gb, 4);
|
||||
skip_bits(gb, 1); // Skip seekable_frame_in_packet, currently ununused
|
||||
spliced_packet = get_bits1(gb);
|
||||
if (spliced_packet)
|
||||
av_log_missing_feature(avctx, "Bitstream splicing", 1);
|
||||
|
||||
/* get number of bits that need to be added to the previous frame */
|
||||
num_bits_prev_frame = get_bits(gb, s->log2_frame_size);
|
||||
|
@ -594,7 +594,7 @@ cglobal emu_edge_core, 2, 7, 0
|
||||
%define valw2 r7w
|
||||
%define valw3 r3w
|
||||
%if WIN64
|
||||
%define valw4 r4w
|
||||
%define valw4 r7w
|
||||
%else ; unix64
|
||||
%define valw4 r3w
|
||||
%endif
|
||||
|
@ -91,11 +91,11 @@ void avfilter_graph_free(AVFilterGraph **graph);
|
||||
/**
|
||||
* A linked-list of the inputs/outputs of the filter chain.
|
||||
*
|
||||
* This is mainly useful for avfilter_graph_parse(), since this
|
||||
* function may accept a description of a graph with not connected
|
||||
* input/output pads. This struct specifies, per each not connected
|
||||
* pad contained in the graph, the filter context and the pad index
|
||||
* required for establishing a link.
|
||||
* This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(),
|
||||
* where it is used to communicate open (unlinked) inputs and outputs from and
|
||||
* to the caller.
|
||||
* This struct specifies, per each not connected pad contained in the graph, the
|
||||
* filter context and the pad index required for establishing a link.
|
||||
*/
|
||||
typedef struct AVFilterInOut {
|
||||
/** unique name for this input/output in the list */
|
||||
@ -112,13 +112,14 @@ typedef struct AVFilterInOut {
|
||||
} AVFilterInOut;
|
||||
|
||||
/**
|
||||
* Create an AVFilterInOut.
|
||||
* Must be free with avfilter_inout_free().
|
||||
* Allocate a single AVFilterInOut entry.
|
||||
* Must be freed with avfilter_inout_free().
|
||||
* @return allocated AVFilterInOut on success, NULL on failure.
|
||||
*/
|
||||
AVFilterInOut *avfilter_inout_alloc(void);
|
||||
|
||||
/**
|
||||
* Free the AVFilterInOut in *inout, and set its pointer to NULL.
|
||||
* Free the supplied list of AVFilterInOut and set *inout to NULL.
|
||||
* If *inout is NULL, do nothing.
|
||||
*/
|
||||
void avfilter_inout_free(AVFilterInOut **inout);
|
||||
@ -140,6 +141,41 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
|
||||
AVFilterInOut **inputs, AVFilterInOut **outputs,
|
||||
void *log_ctx);
|
||||
|
||||
/**
|
||||
* Add a graph described by a string to a graph.
|
||||
*
|
||||
* @param[in] graph the filter graph where to link the parsed graph context
|
||||
* @param[in] filters string to be parsed
|
||||
* @param[out] inputs a linked list of all free (unlinked) inputs of the
|
||||
* parsed graph will be returned here. It is to be freed
|
||||
* by the caller using avfilter_inout_free().
|
||||
* @param[out] outputs a linked list of all free (unlinked) outputs of the
|
||||
* parsed graph will be returned here. It is to be freed by the
|
||||
* caller using avfilter_inout_free().
|
||||
* @return zero on success, a negative AVERROR code on error
|
||||
*
|
||||
* @note the difference between avfilter_graph_parse2() and
|
||||
* avfilter_graph_parse() is that in avfilter_graph_parse(), the caller provides
|
||||
* the lists of inputs and outputs, which therefore must be known before calling
|
||||
* the function. On the other hand, avfilter_graph_parse2() \em returns the
|
||||
* inputs and outputs that are left unlinked after parsing the graph and the
|
||||
* caller then deals with them. Another difference is that in
|
||||
* avfilter_graph_parse(), the inputs parameter describes inputs of the
|
||||
* <em>already existing</em> part of the graph; i.e. from the point of view of
|
||||
* the newly created part, they are outputs. Similarly the outputs parameter
|
||||
* describes outputs of the already existing filters, which are provided as
|
||||
* inputs to the parsed filters.
|
||||
* avfilter_graph_parse2() takes the opposite approach -- it makes no reference
|
||||
* whatsoever to already existing parts of the graph and the inputs parameter
|
||||
* will on return contain inputs of the newly parsed part of the graph.
|
||||
* Analogously the outputs parameter will contain outputs of the newly created
|
||||
* filters.
|
||||
*/
|
||||
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
|
||||
AVFilterInOut **inputs,
|
||||
AVFilterInOut **outputs);
|
||||
|
||||
|
||||
/**
|
||||
* Send a command to one or more filter instances.
|
||||
*
|
||||
|
@ -32,6 +32,7 @@
|
||||
*
|
||||
* @param buf buffer containing frame data to be passed down the filtergraph.
|
||||
* This function will take ownership of buf, the user must not free it.
|
||||
* A NULL buf signals EOF -- i.e. no more frames will be sent to this filter.
|
||||
*/
|
||||
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf);
|
||||
|
||||
|
@ -189,13 +189,15 @@ static AVFilterInOut *extract_inout(const char *label, AVFilterInOut **links)
|
||||
{
|
||||
AVFilterInOut *ret;
|
||||
|
||||
while (*links && strcmp((*links)->name, label))
|
||||
while (*links && (!(*links)->name || strcmp((*links)->name, label)))
|
||||
links = &((*links)->next);
|
||||
|
||||
ret = *links;
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
*links = ret->next;
|
||||
ret->next = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -206,22 +208,31 @@ static void insert_inout(AVFilterInOut **inouts, AVFilterInOut *element)
|
||||
*inouts = element;
|
||||
}
|
||||
|
||||
static void append_inout(AVFilterInOut **inouts, AVFilterInOut **element)
|
||||
{
|
||||
while (*inouts && (*inouts)->next)
|
||||
inouts = &((*inouts)->next);
|
||||
|
||||
if (!*inouts)
|
||||
*inouts = *element;
|
||||
else
|
||||
(*inouts)->next = *element;
|
||||
*element = NULL;
|
||||
}
|
||||
|
||||
static int link_filter_inouts(AVFilterContext *filt_ctx,
|
||||
AVFilterInOut **curr_inputs,
|
||||
AVFilterInOut **open_inputs, void *log_ctx)
|
||||
{
|
||||
int pad = filt_ctx->input_count, ret;
|
||||
int pad, ret;
|
||||
|
||||
while (pad--) {
|
||||
for (pad = 0; pad < filt_ctx->input_count; pad++) {
|
||||
AVFilterInOut *p = *curr_inputs;
|
||||
if (!p) {
|
||||
av_log(log_ctx, AV_LOG_ERROR,
|
||||
"Not enough inputs specified for the \"%s\" filter.\n",
|
||||
filt_ctx->filter->name);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
*curr_inputs = (*curr_inputs)->next;
|
||||
if (p)
|
||||
*curr_inputs = (*curr_inputs)->next;
|
||||
else if (!(p = av_mallocz(sizeof(*p))))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (p->filter_ctx) {
|
||||
if ((ret = link_filter(p->filter_ctx, p->pad_idx, filt_ctx, pad, log_ctx)) < 0)
|
||||
@ -258,6 +269,7 @@ static int link_filter_inouts(AVFilterContext *filt_ctx,
|
||||
static int parse_inputs(const char **buf, AVFilterInOut **curr_inputs,
|
||||
AVFilterInOut **open_outputs, void *log_ctx)
|
||||
{
|
||||
AVFilterInOut *parsed_inputs = NULL;
|
||||
int pad = 0;
|
||||
|
||||
while (**buf == '[') {
|
||||
@ -280,12 +292,15 @@ static int parse_inputs(const char **buf, AVFilterInOut **curr_inputs,
|
||||
match->pad_idx = pad;
|
||||
}
|
||||
|
||||
insert_inout(curr_inputs, match);
|
||||
append_inout(&parsed_inputs, &match);
|
||||
|
||||
*buf += strspn(*buf, WHITESPACES);
|
||||
pad++;
|
||||
}
|
||||
|
||||
append_inout(&parsed_inputs, curr_inputs);
|
||||
*curr_inputs = parsed_inputs;
|
||||
|
||||
return pad;
|
||||
}
|
||||
|
||||
@ -334,10 +349,173 @@ static int parse_outputs(const char **buf, AVFilterInOut **curr_inputs,
|
||||
return pad;
|
||||
}
|
||||
|
||||
#if FF_API_GRAPH_AVCLASS
|
||||
#define log_ctx graph
|
||||
#else
|
||||
#define log_ctx NULL
|
||||
#endif
|
||||
|
||||
static int parse_sws_flags(const char **buf, AVFilterGraph *graph)
|
||||
{
|
||||
char *p = strchr(*buf, ';');
|
||||
|
||||
if (strncmp(*buf, "sws_flags=", 10))
|
||||
return 0;
|
||||
|
||||
if (!p) {
|
||||
av_log(log_ctx, AV_LOG_ERROR, "sws_flags not terminated with ';'.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
*buf += 4; // keep the 'flags=' part
|
||||
|
||||
av_freep(&graph->scale_sws_opts);
|
||||
if (!(graph->scale_sws_opts = av_mallocz(p - *buf + 1)))
|
||||
return AVERROR(ENOMEM);
|
||||
av_strlcpy(graph->scale_sws_opts, *buf, p - *buf + 1);
|
||||
|
||||
*buf = p + 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
|
||||
AVFilterInOut **inputs,
|
||||
AVFilterInOut **outputs)
|
||||
{
|
||||
int index = 0, ret = 0;
|
||||
char chr = 0;
|
||||
|
||||
AVFilterInOut *curr_inputs = NULL, *open_inputs = NULL, *open_outputs = NULL;
|
||||
|
||||
filters += strspn(filters, WHITESPACES);
|
||||
|
||||
if ((ret = parse_sws_flags(&filters, graph)) < 0)
|
||||
goto fail;
|
||||
|
||||
do {
|
||||
AVFilterContext *filter;
|
||||
filters += strspn(filters, WHITESPACES);
|
||||
|
||||
if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, log_ctx)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = parse_filter(&filter, &filters, graph, index, log_ctx)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, log_ctx)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs,
|
||||
log_ctx)) < 0)
|
||||
goto end;
|
||||
|
||||
filters += strspn(filters, WHITESPACES);
|
||||
chr = *filters++;
|
||||
|
||||
if (chr == ';' && curr_inputs)
|
||||
append_inout(&open_outputs, &curr_inputs);
|
||||
index++;
|
||||
} while (chr == ',' || chr == ';');
|
||||
|
||||
if (chr) {
|
||||
av_log(log_ctx, AV_LOG_ERROR,
|
||||
"Unable to parse graph description substring: \"%s\"\n",
|
||||
filters - 1);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto end;
|
||||
}
|
||||
|
||||
append_inout(&open_outputs, &curr_inputs);
|
||||
|
||||
|
||||
*inputs = open_inputs;
|
||||
*outputs = open_outputs;
|
||||
return 0;
|
||||
|
||||
fail:end:
|
||||
for (; graph->filter_count > 0; graph->filter_count--)
|
||||
avfilter_free(graph->filters[graph->filter_count - 1]);
|
||||
av_freep(&graph->filters);
|
||||
avfilter_inout_free(&open_inputs);
|
||||
avfilter_inout_free(&open_outputs);
|
||||
avfilter_inout_free(&curr_inputs);
|
||||
|
||||
*inputs = NULL;
|
||||
*outputs = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
#undef log_ctx
|
||||
|
||||
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
|
||||
AVFilterInOut **open_inputs_ptr, AVFilterInOut **open_outputs_ptr,
|
||||
void *log_ctx)
|
||||
{
|
||||
#if 0
|
||||
int ret;
|
||||
AVFilterInOut *open_inputs = open_inputs_ptr ? *open_inputs_ptr : NULL;
|
||||
AVFilterInOut *open_outputs = open_outputs_ptr ? *open_outputs_ptr : NULL;
|
||||
AVFilterInOut *cur, *match, *inputs = NULL, *outputs = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse2(graph, filters, &inputs, &outputs)) < 0)
|
||||
goto fail;
|
||||
|
||||
/* First input can be omitted if it is "[in]" */
|
||||
if (inputs && !inputs->name)
|
||||
inputs->name = av_strdup("in");
|
||||
for (cur = inputs; cur; cur = cur->next) {
|
||||
if (!cur->name) {
|
||||
av_log(log_ctx, AV_LOG_ERROR,
|
||||
"Not enough inputs specified for the \"%s\" filter.\n",
|
||||
cur->filter_ctx->filter->name);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
}
|
||||
if (!(match = extract_inout(cur->name, &open_outputs)))
|
||||
continue;
|
||||
ret = avfilter_link(match->filter_ctx, match->pad_idx,
|
||||
cur->filter_ctx, cur->pad_idx);
|
||||
avfilter_inout_free(&match);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Last output can be omitted if it is "[out]" */
|
||||
if (outputs && !outputs->name)
|
||||
outputs->name = av_strdup("out");
|
||||
for (cur = outputs; cur; cur = cur->next) {
|
||||
if (!cur->name) {
|
||||
av_log(log_ctx, AV_LOG_ERROR,
|
||||
"Invalid filterchain containing an unlabelled output pad: \"%s\"\n",
|
||||
filters);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
}
|
||||
if (!(match = extract_inout(cur->name, &open_inputs)))
|
||||
continue;
|
||||
ret = avfilter_link(cur->filter_ctx, cur->pad_idx,
|
||||
match->filter_ctx, match->pad_idx);
|
||||
avfilter_inout_free(&match);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fail:
|
||||
if (ret < 0) {
|
||||
for (; graph->filter_count > 0; graph->filter_count--)
|
||||
avfilter_free(graph->filters[graph->filter_count - 1]);
|
||||
av_freep(&graph->filters);
|
||||
}
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
/* clear open_in/outputs only if not passed as parameters */
|
||||
if (open_inputs_ptr) *open_inputs_ptr = open_inputs;
|
||||
else avfilter_inout_free(&open_inputs);
|
||||
if (open_outputs_ptr) *open_outputs_ptr = open_outputs;
|
||||
else avfilter_inout_free(&open_outputs);
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
int index = 0, ret = 0;
|
||||
char chr = 0;
|
||||
|
||||
@ -414,3 +592,5 @@ end:
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -29,8 +29,8 @@
|
||||
#include "libavutil/avutil.h"
|
||||
|
||||
#define LIBAVFILTER_VERSION_MAJOR 2
|
||||
#define LIBAVFILTER_VERSION_MINOR 69
|
||||
#define LIBAVFILTER_VERSION_MICRO 101
|
||||
#define LIBAVFILTER_VERSION_MINOR 70
|
||||
#define LIBAVFILTER_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
|
||||
LIBAVFILTER_VERSION_MINOR, \
|
||||
|
@ -39,6 +39,7 @@ typedef struct {
|
||||
AVRational time_base; ///< time_base to set in the output link
|
||||
AVRational sample_aspect_ratio;
|
||||
char sws_param[256];
|
||||
int eof;
|
||||
} BufferSourceContext;
|
||||
|
||||
#define CHECK_PARAM_CHANGE(s, c, width, height, format)\
|
||||
@ -55,6 +56,12 @@ int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
|
||||
AVFilterBufferRef *buf;
|
||||
int ret;
|
||||
|
||||
if (!picref) {
|
||||
c->eof = 1;
|
||||
return 0;
|
||||
} else if (c->eof)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
if (!av_fifo_space(c->fifo) &&
|
||||
(ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
|
||||
sizeof(buf))) < 0)
|
||||
@ -125,6 +132,12 @@ int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
|
||||
BufferSourceContext *c = s->priv;
|
||||
int ret;
|
||||
|
||||
if (!buf) {
|
||||
c->eof = 1;
|
||||
return 0;
|
||||
} else if (c->eof)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
if (!av_fifo_space(c->fifo) &&
|
||||
(ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
|
||||
sizeof(buf))) < 0)
|
||||
@ -144,9 +157,17 @@ int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
|
||||
int av_vsrc_buffer_add_frame(AVFilterContext *buffer_src,
|
||||
const AVFrame *frame, int flags)
|
||||
{
|
||||
BufferSourceContext *c = buffer_src->priv;
|
||||
AVFilterBufferRef *picref;
|
||||
int ret;
|
||||
AVFilterBufferRef *picref =
|
||||
avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
|
||||
|
||||
if (!frame) {
|
||||
c->eof = 1;
|
||||
return 0;
|
||||
} else if (c->eof)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
|
||||
if (!picref)
|
||||
return AVERROR(ENOMEM);
|
||||
ret = av_vsrc_buffer_add_video_buffer_ref(buffer_src, picref, flags);
|
||||
@ -226,6 +247,8 @@ static int request_frame(AVFilterLink *link)
|
||||
AVFilterBufferRef *buf;
|
||||
|
||||
if (!av_fifo_size(c->fifo)) {
|
||||
if (c->eof)
|
||||
return AVERROR_EOF;
|
||||
av_log(link->src, AV_LOG_WARNING,
|
||||
"request_frame() called with no available frame!\n");
|
||||
return AVERROR(EINVAL);
|
||||
@ -243,7 +266,10 @@ static int request_frame(AVFilterLink *link)
|
||||
static int poll_frame(AVFilterLink *link)
|
||||
{
|
||||
BufferSourceContext *c = link->src->priv;
|
||||
return !!av_fifo_size(c->fifo);
|
||||
int size = av_fifo_size(c->fifo);
|
||||
if (!size && c->eof)
|
||||
return AVERROR_EOF;
|
||||
return size/sizeof(AVFilterBufferRef*);
|
||||
}
|
||||
|
||||
AVFilter avfilter_vsrc_buffer = {
|
||||
|
@ -107,6 +107,7 @@ typedef struct MOVStreamContext {
|
||||
unsigned int alt_sample_size; ///< always contains sample size from stsz atom
|
||||
unsigned int sample_count;
|
||||
int *sample_sizes;
|
||||
int keyframe_absent;
|
||||
unsigned int keyframe_count;
|
||||
int *keyframes;
|
||||
int time_scale;
|
||||
|
@ -1618,7 +1618,10 @@ static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
av_dlog(c->fc, "keyframe_count = %d\n", entries);
|
||||
|
||||
if (!entries)
|
||||
{
|
||||
sc->keyframe_absent = 1;
|
||||
return 0;
|
||||
}
|
||||
if (entries >= UINT_MAX / sizeof(int))
|
||||
return AVERROR_INVALIDDATA;
|
||||
sc->keyframes = av_malloc(entries * sizeof(int));
|
||||
@ -1873,7 +1876,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!sc->keyframe_count || current_sample+key_off == sc->keyframes[stss_index]) {
|
||||
if (!sc->keyframe_absent && (!sc->keyframe_count || current_sample+key_off == sc->keyframes[stss_index])) {
|
||||
keyframe = 1;
|
||||
if (stss_index + 1 < sc->keyframe_count)
|
||||
stss_index++;
|
||||
|
@ -1975,6 +1975,8 @@ static int has_duration(AVFormatContext *ic)
|
||||
if (st->duration != AV_NOPTS_VALUE)
|
||||
return 1;
|
||||
}
|
||||
if (ic->duration)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -469,7 +469,7 @@ yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int j;
|
||||
int Y1 = 1 << 18;
|
||||
int Y2 = 1 << 18;
|
||||
@ -512,7 +512,7 @@ yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
|
||||
int uvalpha1 = 4095 - uvalpha;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
|
||||
int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
|
||||
int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
|
||||
@ -539,7 +539,7 @@ yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
|
||||
int i;
|
||||
|
||||
if (uvalpha < 2048) {
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int Y1 = (buf0[i * 2 ]+64) >> 7;
|
||||
int Y2 = (buf0[i * 2 + 1]+64) >> 7;
|
||||
int U = (ubuf0[i] +64) >> 7;
|
||||
@ -561,7 +561,7 @@ yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
|
||||
}
|
||||
} else {
|
||||
const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int Y1 = (buf0[i * 2 ] + 64) >> 7;
|
||||
int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
|
||||
int U = (ubuf0[i] + ubuf1[i]+128) >> 8;
|
||||
@ -608,7 +608,7 @@ yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter,
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int j;
|
||||
int Y1 = -0x40000000;
|
||||
int Y2 = -0x40000000;
|
||||
@ -671,7 +671,7 @@ yuv2rgb48_2_c_template(SwsContext *c, const int32_t *buf[2],
|
||||
int uvalpha1 = 4095 - uvalpha;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
|
||||
int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
|
||||
int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14;
|
||||
@ -709,7 +709,7 @@ yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0,
|
||||
int i;
|
||||
|
||||
if (uvalpha < 2048) {
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int Y1 = (buf0[i * 2] ) >> 2;
|
||||
int Y2 = (buf0[i * 2 + 1]) >> 2;
|
||||
int U = (ubuf0[i] + (-128 << 11)) >> 2;
|
||||
@ -737,7 +737,7 @@ yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0,
|
||||
}
|
||||
} else {
|
||||
const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int Y1 = (buf0[i * 2] ) >> 2;
|
||||
int Y2 = (buf0[i * 2 + 1]) >> 2;
|
||||
int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3;
|
||||
@ -952,7 +952,7 @@ yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int j, A1, A2;
|
||||
int Y1 = 1 << 18;
|
||||
int Y2 = 1 << 18;
|
||||
@ -1012,7 +1012,7 @@ yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
|
||||
int uvalpha1 = 4095 - uvalpha;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
|
||||
int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
|
||||
int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
|
||||
@ -1050,7 +1050,7 @@ yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
|
||||
int i;
|
||||
|
||||
if (uvalpha < 2048) {
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int Y1 = (buf0[i * 2 ] + 64) >> 7;
|
||||
int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
|
||||
int U = (ubuf0[i] + 64) >> 7;
|
||||
@ -1077,7 +1077,7 @@ yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
|
||||
}
|
||||
} else {
|
||||
const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
|
||||
for (i = 0; i < (dstW >> 1); i++) {
|
||||
for (i = 0; i < ((dstW + 1) >> 1); i++) {
|
||||
int Y1 = (buf0[i * 2 ] + 64) >> 7;
|
||||
int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
|
||||
int U = (ubuf0[i] + ubuf1[i] + 128) >> 8;
|
||||
|
@ -40,7 +40,7 @@ do_lavfi "crop_vflip" "crop=iw-100:ih-100:100:100,vflip"
|
||||
do_lavfi "drawbox" "drawbox=224:24:88:72:#FF8010@0.5"
|
||||
do_lavfi "fade" "fade=in:5:15,fade=out:30:15"
|
||||
do_lavfi "null" "null"
|
||||
do_lavfi "overlay" "split[m],scale=88:72,pad=96:80:4:4[o2];[m]fifo,[o2]overlay=240:16"
|
||||
do_lavfi "overlay" "split[m],scale=88:72,pad=96:80:4:4[o2];[m]fifo[o1],[o1][o2]overlay=240:16"
|
||||
do_lavfi "pad" "pad=iw*1.5:ih*1.5:iw*0.3:ih*0.2"
|
||||
do_lavfi "pp" "mp=pp=be/de/tn/l5/al"
|
||||
do_lavfi "pp2" "mp=pp=be/fq:16/fa/lb"
|
||||
|
Loading…
Reference in New Issue
Block a user