mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-01-29 22:00:58 +02:00
Merge remote-tracking branch 'qatar/master'
* qatar/master: rtp: factorize dynamic payload type fallback flvdec: Ignore the index if it's from a creator known to be different cmdutils: move grow_array out of #if CONFIG_AVFILTER avconv: actually set InputFile.rate_emu ratecontrol: update last_qscale_for sooner Fix unnecessary shift with 9/10bit vertical scaling prores: mark prores as intra-only in libavformat/utils.c:is_intra_only() prores: return more meaningful error values prores: improve error message wording prores: cosmetics: prettyprinting, drop useless parentheses prores: lowercase AVCodec name entry Conflicts: cmdutils.c libavcodec/proresdec_lgpl.c tests/ref/lavfi/pixfmts_scale Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
a7758884db
1
avconv.c
1
avconv.c
@ -3021,6 +3021,7 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena
|
||||
input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams;
|
||||
input_files[nb_input_files - 1].ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
|
||||
input_files[nb_input_files - 1].nb_streams = ic->nb_streams;
|
||||
input_files[nb_input_files - 1].rate_emu = o->rate_emu;
|
||||
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
|
@ -129,31 +129,31 @@ static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
|
||||
|
||||
hdr_size = AV_RB16(buf);
|
||||
if (hdr_size > data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "frame data too short!\n");
|
||||
return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "frame data too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
version = AV_RB16(buf + 2);
|
||||
if (version >= 2) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"unsupported header version: %d\n", version);
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
width = AV_RB16(buf + 8);
|
||||
height = AV_RB16(buf + 10);
|
||||
if (width != avctx->width || height != avctx->height) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"picture dimension changed! Old: %d x %d, new: %d x %d\n",
|
||||
"picture dimension changed: old: %d x %d, new: %d x %d\n",
|
||||
avctx->width, avctx->height, width, height);
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ctx->frame_type = (buf[12] >> 2) & 3;
|
||||
if (ctx->frame_type > 2) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"unsupported frame type: %d!\n", ctx->frame_type);
|
||||
return -1;
|
||||
"unsupported frame type: %d\n", ctx->frame_type);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ctx->chroma_factor = (buf[12] >> 6) & 3;
|
||||
@ -168,8 +168,8 @@ static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"unsupported picture format: %d!\n", ctx->pic_format);
|
||||
return -1;
|
||||
"unsupported picture format: %d\n", ctx->pic_format);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (ctx->scantable_type != ctx->frame_type) {
|
||||
@ -192,8 +192,8 @@ static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
|
||||
flags = buf[19];
|
||||
if (flags & 2) {
|
||||
if (ptr - buf > hdr_size - 64) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Too short header data\n");
|
||||
return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "header data too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (memcmp(ctx->qmat_luma, ptr, 64)) {
|
||||
memcpy(ctx->qmat_luma, ptr, 64);
|
||||
@ -207,7 +207,7 @@ static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
|
||||
|
||||
if (flags & 1) {
|
||||
if (ptr - buf > hdr_size - 64) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Too short header data\n");
|
||||
av_log(avctx, AV_LOG_ERROR, "header data too small\n");
|
||||
return -1;
|
||||
}
|
||||
if (memcmp(ctx->qmat_chroma, ptr, 64)) {
|
||||
@ -233,32 +233,32 @@ static int decode_picture_header(ProresContext *ctx, const uint8_t *buf,
|
||||
|
||||
hdr_size = data_size > 0 ? buf[0] >> 3 : 0;
|
||||
if (hdr_size < 8 || hdr_size > data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "picture header too short!\n");
|
||||
return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "picture header too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
pic_data_size = AV_RB32(buf + 1);
|
||||
if (pic_data_size > data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "picture data too short!\n");
|
||||
return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "picture data too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
slice_width_factor = buf[7] >> 4;
|
||||
slice_height_factor = buf[7] & 0xF;
|
||||
if (slice_width_factor > 3 || slice_height_factor) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"unsupported slice dimension: %d x %d!\n",
|
||||
"unsupported slice dimension: %d x %d\n",
|
||||
1 << slice_width_factor, 1 << slice_height_factor);
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ctx->slice_width_factor = slice_width_factor;
|
||||
ctx->slice_height_factor = slice_height_factor;
|
||||
|
||||
ctx->num_x_mbs = (avctx->width + 15) >> 4;
|
||||
ctx->num_y_mbs =
|
||||
(avctx->height + (1 << (4 + ctx->picture.interlaced_frame)) - 1) >>
|
||||
(4 + ctx->picture.interlaced_frame);
|
||||
ctx->num_y_mbs = (avctx->height +
|
||||
(1 << (4 + ctx->picture.interlaced_frame)) - 1) >>
|
||||
(4 + ctx->picture.interlaced_frame);
|
||||
|
||||
remainder = ctx->num_x_mbs & ((1 << slice_width_factor) - 1);
|
||||
num_x_slices = (ctx->num_x_mbs >> slice_width_factor) + (remainder & 1) +
|
||||
@ -266,22 +266,21 @@ static int decode_picture_header(ProresContext *ctx, const uint8_t *buf,
|
||||
|
||||
num_slices = num_x_slices * ctx->num_y_mbs;
|
||||
if (num_slices != AV_RB16(buf + 5)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid number of slices!\n");
|
||||
return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid number of slices\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (ctx->total_slices != num_slices) {
|
||||
av_freep(&ctx->slice_data_index);
|
||||
ctx->slice_data_index =
|
||||
av_malloc((num_slices + 1) * sizeof(uint8_t*));
|
||||
ctx->slice_data_index = av_malloc((num_slices + 1) * sizeof(uint8_t*));
|
||||
if (!ctx->slice_data_index)
|
||||
return AVERROR(ENOMEM);
|
||||
ctx->total_slices = num_slices;
|
||||
}
|
||||
|
||||
if (hdr_size + num_slices * 2 > data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "slice table too short!\n");
|
||||
return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "slice table too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* parse slice table allowing quick access to the slice data */
|
||||
@ -295,7 +294,7 @@ static int decode_picture_header(ProresContext *ctx, const uint8_t *buf,
|
||||
ctx->slice_data_index[i] = data_ptr;
|
||||
|
||||
if (data_ptr > buf + data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "out of slice data!\n");
|
||||
av_log(avctx, AV_LOG_ERROR, "out of slice data\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -330,7 +329,7 @@ static inline int decode_vlc_codeword(GetBitContext *gb, uint8_t codebook)
|
||||
LAST_SKIP_BITS(re, gb, log + 1);
|
||||
} else {
|
||||
prefix_len = log + 1;
|
||||
code = (log << rice_order) + NEG_USR32((buf << prefix_len), rice_order);
|
||||
code = (log << rice_order) + NEG_USR32(buf << prefix_len, rice_order);
|
||||
LAST_SKIP_BITS(re, gb, prefix_len + rice_order);
|
||||
}
|
||||
} else { /* otherwise we got a exp golomb code */
|
||||
@ -519,8 +518,7 @@ static void decode_slice_plane(ProresContext *ctx, const uint8_t *buf,
|
||||
/* inverse quantization, inverse transform and output */
|
||||
block_ptr = ctx->blocks;
|
||||
|
||||
for (blk_num = 0; blk_num < blocks_per_slice;
|
||||
blk_num++, block_ptr += 64) {
|
||||
for (blk_num = 0; blk_num < blocks_per_slice; blk_num++, block_ptr += 64) {
|
||||
/* TODO: the correct solution shoud be (block_ptr[i] * qmat[i]) >> 1
|
||||
* and the input of the inverse transform should be scaled by 2
|
||||
* in order to avoid rounding errors.
|
||||
@ -572,8 +570,8 @@ static int decode_slice(ProresContext *ctx, int pic_num, int slice_num,
|
||||
}
|
||||
|
||||
if (slice_data_size < 6) {
|
||||
av_log(avctx, AV_LOG_ERROR, "slice data too short!\n");
|
||||
return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "slice data too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* parse slice header */
|
||||
@ -583,8 +581,8 @@ static int decode_slice(ProresContext *ctx, int pic_num, int slice_num,
|
||||
v_data_size = slice_data_size - y_data_size - u_data_size - hdr_size;
|
||||
|
||||
if (v_data_size < 0 || hdr_size < 6) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid data sizes!\n");
|
||||
return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid data size\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
sf = av_clip(buf[1], 1, 224);
|
||||
@ -595,7 +593,7 @@ static int decode_slice(ProresContext *ctx, int pic_num, int slice_num,
|
||||
if (ctx->qmat_changed || sf != ctx->prev_slice_sf) {
|
||||
ctx->prev_slice_sf = sf;
|
||||
for (i = 0; i < 64; i++) {
|
||||
ctx->qmat_luma_scaled[i] = ctx->qmat_luma[i] * sf;
|
||||
ctx->qmat_luma_scaled[i] = ctx->qmat_luma[i] * sf;
|
||||
ctx->qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * sf;
|
||||
}
|
||||
}
|
||||
@ -671,14 +669,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
if (buf_size < 28 || buf_size < AV_RB32(buf) ||
|
||||
AV_RB32(buf + 4) != FRAME_ID) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid frame\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
MOVE_DATA_PTR(8);
|
||||
|
||||
frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
|
||||
if (frame_hdr_size < 0)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
MOVE_DATA_PTR(frame_hdr_size);
|
||||
|
||||
@ -692,7 +690,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
for (pic_num = 0; ctx->picture.interlaced_frame - pic_num + 1; pic_num++) {
|
||||
pic_data_size = decode_picture_header(ctx, buf, buf_size, avctx);
|
||||
if (pic_data_size < 0)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (decode_picture(ctx, pic_num, avctx))
|
||||
return -1;
|
||||
@ -721,7 +719,7 @@ static av_cold int decode_close(AVCodecContext *avctx)
|
||||
|
||||
|
||||
AVCodec ff_prores_lgpl_decoder = {
|
||||
.name = "ProRes_lgpl",
|
||||
.name = "prores_lgpl",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_PRORES,
|
||||
.priv_data_size = sizeof(ProresContext),
|
||||
|
@ -861,7 +861,9 @@ static int init_pass2(MpegEncContext *s)
|
||||
|
||||
/* find qscale */
|
||||
for(i=0; i<rcc->num_entries; i++){
|
||||
RateControlEntry *rce= &rcc->entry[i];
|
||||
qscale[i]= get_qscale(s, &rcc->entry[i], rate_factor, i);
|
||||
rcc->last_qscale_for[rce->pict_type] = qscale[i];
|
||||
}
|
||||
assert(filter_size%2==1);
|
||||
|
||||
|
@ -139,6 +139,18 @@ static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc, AVStream
|
||||
int64_t *filepositions = NULL;
|
||||
int ret = AVERROR(ENOSYS);
|
||||
int64_t initial_pos = avio_tell(ioc);
|
||||
AVDictionaryEntry *creator = av_dict_get(s->metadata, "metadatacreator",
|
||||
NULL, 0);
|
||||
|
||||
if (creator && !strcmp(creator->value, "MEGA")) {
|
||||
/* Files with this metadatacreator tag seem to have filepositions
|
||||
* pointing at the 4 trailer bytes of the previous packet,
|
||||
* which isn't the norm (nor what we expect here, nor what
|
||||
* jwplayer + lighttpd expect, nor what flvtool2 produces).
|
||||
* Just ignore the index in this case, instead of risking trying
|
||||
* to adjust it to something that might or might not work. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (avio_tell(ioc) < max_pos - 2 && amf_get_string(ioc, str_val, sizeof(str_val)) > 0) {
|
||||
int64_t** current_array;
|
||||
|
@ -103,6 +103,11 @@ int ff_rtp_get_payload_type(AVCodecContext *codec)
|
||||
continue;
|
||||
payload_type = AVRtpPayloadTypes[i].pt;
|
||||
}
|
||||
|
||||
/* dynamic payload type */
|
||||
if (payload_type < 0)
|
||||
payload_type = RTP_PT_PRIVATE + (codec->codec_type == AVMEDIA_TYPE_AUDIO);
|
||||
|
||||
return payload_type;
|
||||
}
|
||||
|
||||
|
@ -93,9 +93,6 @@ static int rtp_write_header(AVFormatContext *s1)
|
||||
}
|
||||
|
||||
s->payload_type = ff_rtp_get_payload_type(st->codec);
|
||||
if (s->payload_type < 0)
|
||||
s->payload_type = RTP_PT_PRIVATE + (st->codec->codec_type == AVMEDIA_TYPE_AUDIO);
|
||||
|
||||
s->base_timestamp = av_get_random_seed();
|
||||
s->timestamp = s->base_timestamp;
|
||||
s->cur_timestamp = 0;
|
||||
|
@ -533,9 +533,6 @@ void ff_sdp_write_media(char *buff, int size, AVCodecContext *c, const char *des
|
||||
int payload_type;
|
||||
|
||||
payload_type = ff_rtp_get_payload_type(c);
|
||||
if (payload_type < 0) {
|
||||
payload_type = RTP_PT_PRIVATE + (c->codec_type == AVMEDIA_TYPE_AUDIO);
|
||||
}
|
||||
|
||||
switch (c->codec_type) {
|
||||
case AVMEDIA_TYPE_VIDEO : type = "video" ; break;
|
||||
|
@ -887,6 +887,7 @@ static int is_intra_only(AVCodecContext *enc){
|
||||
case CODEC_ID_MJPEG:
|
||||
case CODEC_ID_MJPEGB:
|
||||
case CODEC_ID_LJPEG:
|
||||
case CODEC_ID_PRORES:
|
||||
case CODEC_ID_RAWVIDEO:
|
||||
case CODEC_ID_DVVIDEO:
|
||||
case CODEC_ID_HUFFYUV:
|
||||
@ -896,7 +897,6 @@ static int is_intra_only(AVCodecContext *enc){
|
||||
case CODEC_ID_VCR1:
|
||||
case CODEC_ID_DNXHD:
|
||||
case CODEC_ID_JPEG2000:
|
||||
case CODEC_ID_PRORES:
|
||||
return 1;
|
||||
default: break;
|
||||
}
|
||||
|
@ -363,7 +363,7 @@ yuv2yuvX10_c_template(const int16_t *lumFilter, const int16_t **lumSrc,
|
||||
int i;
|
||||
uint16_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2],
|
||||
*aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL;
|
||||
int shift = 11 + 16 - output_bits - 1;
|
||||
int shift = 11 + 16 - output_bits;
|
||||
|
||||
#define output_pixel(pos, val) \
|
||||
if (big_endian) { \
|
||||
@ -372,24 +372,24 @@ yuv2yuvX10_c_template(const int16_t *lumFilter, const int16_t **lumSrc,
|
||||
AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
|
||||
}
|
||||
for (i = 0; i < dstW; i++) {
|
||||
int val = 1 << (26-output_bits - 1);
|
||||
int val = 1 << (26-output_bits);
|
||||
int j;
|
||||
|
||||
for (j = 0; j < lumFilterSize; j++)
|
||||
val += (lumSrc[j][i] * lumFilter[j]) >> 1;
|
||||
val += lumSrc[j][i] * lumFilter[j];
|
||||
|
||||
output_pixel(&yDest[i], val);
|
||||
}
|
||||
|
||||
if (uDest) {
|
||||
for (i = 0; i < chrDstW; i++) {
|
||||
int u = 1 << (26-output_bits - 1);
|
||||
int v = 1 << (26-output_bits - 1);
|
||||
int u = 1 << (26-output_bits);
|
||||
int v = 1 << (26-output_bits);
|
||||
int j;
|
||||
|
||||
for (j = 0; j < chrFilterSize; j++) {
|
||||
u += (chrUSrc[j][i] * chrFilter[j]) >> 1;
|
||||
v += (chrVSrc[j][i] * chrFilter[j]) >> 1;
|
||||
u += chrUSrc[j][i] * chrFilter[j];
|
||||
v += chrVSrc[j][i] * chrFilter[j];
|
||||
}
|
||||
|
||||
output_pixel(&uDest[i], u);
|
||||
@ -399,11 +399,11 @@ yuv2yuvX10_c_template(const int16_t *lumFilter, const int16_t **lumSrc,
|
||||
|
||||
if (CONFIG_SWSCALE_ALPHA && aDest) {
|
||||
for (i = 0; i < dstW; i++) {
|
||||
int val = 1 << (26-output_bits - 1);
|
||||
int val = 1 << (26-output_bits);
|
||||
int j;
|
||||
|
||||
for (j = 0; j < lumFilterSize; j++)
|
||||
val += (alpSrc[j][i] * lumFilter[j]) >> 1;
|
||||
val += alpSrc[j][i] * lumFilter[j];
|
||||
|
||||
output_pixel(&aDest[i], val);
|
||||
}
|
||||
|
@ -31,15 +31,15 @@ uyvy422 314bd486277111a95d9369b944fa0400
|
||||
yuv410p 7df8f6d69b56a8dcb6c7ee908e5018b5
|
||||
yuv411p 1143e7c5cc28fe0922b051b17733bc4c
|
||||
yuv420p fdad2d8df8985e3d17e73c71f713cb14
|
||||
yuv420p10be dfa4d57bbc0e1a81f86a3895ab4feac0
|
||||
yuv420p10le af898206e757b0fca844a336f71d0091
|
||||
yuv420p10be 6d335e75b553da590135cf8bb999610c
|
||||
yuv420p10le d510ddbabefd03ef39ec943fcb51b709
|
||||
yuv420p16be 2a75942af24fbdc1fdfe189c6e7bf589
|
||||
yuv420p16le c4264d92a7c273967a778f4f5daddbe3
|
||||
yuv420p9be 046091d96f2a78e224036f203d8c9601
|
||||
yuv420p9le c9abfffee99fcf5fcbfc5adcda14e4b4
|
||||
yuv420p9be ec4983b7a949c0472110a7a2c58e278a
|
||||
yuv420p9le c136dce5913a722eee44ab72cff664b2
|
||||
yuv422p 918e37701ee7377d16a8a6c119c56a40
|
||||
yuv422p10be 35206fcd7e00ee582a8c366b37d57d1d
|
||||
yuv422p10le 396f930e2da02f149ab9dd5b781cbe8d
|
||||
yuv422p10be cea7ca6b0e66d6f29539885896c88603
|
||||
yuv422p10le a10c4a5837547716f13cd61918b145f9
|
||||
yuv422p16be 285993ee0c0f4f8e511ee46f93c5f38c
|
||||
yuv422p16le 61bfcee8e54465f760164f5a75d40b5e
|
||||
yuv440p 461503fdb9b90451020aa3b25ddf041c
|
||||
|
@ -1,4 +1,4 @@
|
||||
cb29b6ae4e1562d95f9311991fef98df *./tests/data/vsynth1/dnxhd-720p-10bit.dnxhd
|
||||
b5e24a055af02edec8674333260214fd *./tests/data/vsynth1/dnxhd-720p-10bit.dnxhd
|
||||
2293760 ./tests/data/vsynth1/dnxhd-720p-10bit.dnxhd
|
||||
2f45bb1af7da5dd3dca870ac87237b7d *./tests/data/dnxhd_720p_10bit.vsynth1.out.yuv
|
||||
4466ff3d73d01bbe75ea25001d379b63 *./tests/data/dnxhd_720p_10bit.vsynth1.out.yuv
|
||||
stddev: 6.27 PSNR: 32.18 MAXDIFF: 64 bytes: 760320/ 7603200
|
||||
|
@ -1,4 +1,4 @@
|
||||
8648511257afb816b5b911706ca391db *./tests/data/vsynth2/dnxhd-720p-10bit.dnxhd
|
||||
4b57da2c0c1280469ff3579f7151c227 *./tests/data/vsynth2/dnxhd-720p-10bit.dnxhd
|
||||
2293760 ./tests/data/vsynth2/dnxhd-720p-10bit.dnxhd
|
||||
391b6f5aa7c7b488b479cb43d420b860 *./tests/data/dnxhd_720p_10bit.vsynth2.out.yuv
|
||||
31a6aa8b8702e85fa3b48e73f035c4e4 *./tests/data/dnxhd_720p_10bit.vsynth2.out.yuv
|
||||
stddev: 1.35 PSNR: 45.46 MAXDIFF: 23 bytes: 760320/ 7603200
|
||||
|
Loading…
x
Reference in New Issue
Block a user