1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Add Apple Pixlet decoder

Signed-off-by: Vittorio Giovara <vittorio.giovara@gmail.com>
This commit is contained in:
Paul B Mahol 2016-09-19 08:53:03 -04:00 committed by Vittorio Giovara
parent 19d57ca62e
commit aba5b94859
11 changed files with 708 additions and 1 deletions

View File

@ -8,6 +8,7 @@ version <next>:
- VAAPI-accelerated deinterlacing
- config.log and other configuration files moved into avbuild/ directory
- VAAPI-accelerated MPEG-2 and VP8 encoding
- Apple Pixlet decoder
version 12:

View File

@ -590,6 +590,7 @@ following image formats are supported:
@item ANSI/ASCII art @tab @tab X
@item Apple Intermediate Codec @tab @tab X
@item Apple MJPEG-B @tab @tab X
@item Apple Pixlet @tab @tab X
@item Apple ProRes @tab X @tab X
@item Apple QuickDraw @tab @tab X
@tab fourcc: qdrw

View File

@ -375,6 +375,7 @@ OBJS-$(CONFIG_PGMYUV_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o
OBJS-$(CONFIG_PGSSUB_DECODER) += pgssubdec.o
OBJS-$(CONFIG_PICTOR_DECODER) += pictordec.o cga_data.o
OBJS-$(CONFIG_PIXLET_DECODER) += pixlet.o
OBJS-$(CONFIG_PNG_DECODER) += png.o pngdec.o pngdsp.o
OBJS-$(CONFIG_PNG_ENCODER) += png.o pngenc.o
OBJS-$(CONFIG_PPM_DECODER) += pnmdec.o pnm.o

View File

@ -231,6 +231,7 @@ void avcodec_register_all(void)
REGISTER_ENCDEC (PGM, pgm);
REGISTER_ENCDEC (PGMYUV, pgmyuv);
REGISTER_DECODER(PICTOR, pictor);
REGISTER_DECODER(PIXLET, pixlet);
REGISTER_ENCDEC (PNG, png);
REGISTER_ENCDEC (PPM, ppm);
REGISTER_ENCDEC (PRORES, prores);

View File

@ -392,6 +392,7 @@ enum AVCodecID {
AV_CODEC_ID_MAGICYUV,
AV_CODEC_ID_TRUEMOTION2RT,
AV_CODEC_ID_AV1,
AV_CODEC_ID_PIXLET,
/* various PCM "codecs" */
AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs

View File

@ -1212,6 +1212,13 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("Alliance for Open Media AV1"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_PIXLET,
.type = AVMEDIA_TYPE_VIDEO,
.name = "pixlet",
.long_name = NULL_IF_CONFIG_SMALL("Apple Pixlet"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
/* image codecs */
{

688
libavcodec/pixlet.c Normal file
View File

@ -0,0 +1,688 @@
/*
* Apple Pixlet decoder
* Copyright (c) 2016 Paul B Mahol
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/imgutils.h"
#include "libavutil/intmath.h"
#include "libavutil/opt.h"
#include "avcodec.h"
#include "bitstream.h"
#include "bytestream.h"
#include "internal.h"
#include "thread.h"
#include "unary.h"
#define NB_LEVELS 4
#define PIXLET_MAGIC 0xDEADBEEF
#define H 0
#define V 1
#define SQR(x) ((x) * (x))
typedef struct SubBand {
size_t width, height;
size_t size;
size_t x, y;
} SubBand;
typedef struct PixletContext {
AVClass *class;
GetByteContext gb;
BitstreamContext bc;
int levels;
int depth;
size_t w, h;
int16_t *filter[2];
int16_t *prediction;
float scaling[4][2][NB_LEVELS];
SubBand band[4][NB_LEVELS * 3 + 1];
} PixletContext;
static av_cold int pixlet_init(AVCodecContext *avctx)
{
avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
avctx->color_range = AVCOL_RANGE_JPEG;
return 0;
}
static av_cold int pixlet_close(AVCodecContext *avctx)
{
PixletContext *ctx = avctx->priv_data;
av_freep(&ctx->filter[0]);
av_freep(&ctx->filter[1]);
av_freep(&ctx->prediction);
return 0;
}
static int init_decoder(AVCodecContext *avctx)
{
PixletContext *ctx = avctx->priv_data;
int i, plane;
ctx->filter[0] = av_malloc_array(ctx->h, sizeof(int16_t));
ctx->filter[1] = av_malloc_array(FFMAX(ctx->h, ctx->w) + 16, sizeof(int16_t));
ctx->prediction = av_malloc_array((ctx->w >> NB_LEVELS), sizeof(int16_t));
if (!ctx->filter[0] || !ctx->filter[1] || !ctx->prediction)
return AVERROR(ENOMEM);
for (plane = 0; plane < 3; plane++) {
unsigned shift = plane > 0;
size_t w = ctx->w >> shift;
size_t h = ctx->h >> shift;
ctx->band[plane][0].width = w >> NB_LEVELS;
ctx->band[plane][0].height = h >> NB_LEVELS;
ctx->band[plane][0].size = (w >> NB_LEVELS) * (h >> NB_LEVELS);
for (i = 0; i < NB_LEVELS * 3; i++) {
unsigned scale = ctx->levels - (i / 3);
ctx->band[plane][i + 1].width = w >> scale;
ctx->band[plane][i + 1].height = h >> scale;
ctx->band[plane][i + 1].size = (w >> scale) * (h >> scale);
ctx->band[plane][i + 1].x = (w >> scale) * (((i + 1) % 3) != 2);
ctx->band[plane][i + 1].y = (h >> scale) * (((i + 1) % 3) != 1);
}
}
return 0;
}
static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, size_t size,
size_t width, ptrdiff_t stride)
{
PixletContext *ctx = avctx->priv_data;
BitstreamContext *bc = &ctx->bc;
unsigned cnt1, nbits, k, j = 0, i = 0;
int64_t value, state = 3;
int rlen, escape, flag = 0;
while (i < size) {
nbits = FFMIN(ff_clz((state >> 8) + 3) ^ 0x1F, 14);
cnt1 = get_unary(bc, 0, 8);
if (cnt1 < 8) {
value = bitstream_read(bc, nbits);
if (value <= 1) {
bitstream_unget(bc, value & 1, 1);
value = 1;
}
escape = value + ((1 << nbits) - 1) * cnt1 - 1;
} else {
escape = bitstream_read(bc, 16);
}
value = -((escape + flag) & 1) | 1;
dst[j++] = value * ((escape + flag + 1) >> 1);
i++;
if (j == width) {
j = 0;
dst += stride;
}
state = 120 * (escape + flag) + state - (120 * state >> 8);
flag = 0;
if (state * 4 > 0xFF || i >= size)
continue;
nbits = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
escape = av_mod_uintp2(16383, nbits);
cnt1 = get_unary(bc, 0, 8);
if (cnt1 > 7) {
rlen = bitstream_read(bc, 16);
} else {
value = bitstream_read(bc, nbits);
if (value <= 1) {
bitstream_unget(bc, value & 1, 1);
value = 1;
}
rlen = value + escape * cnt1 - 1;
}
if (i + rlen > size)
return AVERROR_INVALIDDATA;
i += rlen;
for (k = 0; k < rlen; k++) {
dst[j++] = 0;
if (j == width) {
j = 0;
dst += stride;
}
}
state = 0;
flag = rlen < 0xFFFF ? 1 : 0;
}
bitstream_align(bc);
return bitstream_tell(bc) >> 3;
}
static int read_high_coeffs(AVCodecContext *avctx, uint8_t *src, int16_t *dst,
int size, int64_t c, int a, int64_t d,
int width, ptrdiff_t stride)
{
PixletContext *ctx = avctx->priv_data;
BitstreamContext *bc = &ctx->bc;
unsigned cnt1, shbits, rlen, nbits, length, i = 0, j = 0, k;
int ret, escape, pfx, cthulu, yflag, xflag, flag = 0;
int64_t state = 3, value, tmp;
ret = bitstream_init8(bc, src, bytestream2_get_bytes_left(&ctx->gb));
if (ret < 0)
return ret;
cthulu = (a >= 0) + (a ^ (a >> 31)) - (a >> 31);
if (cthulu != 1) {
nbits = 33 - ff_clz(cthulu - 1);
if (nbits > 16)
return AVERROR_INVALIDDATA;
} else {
nbits = 1;
}
length = 25 - nbits;
while (i < size) {
if (state >> 8 != -3)
value = ff_clz((state >> 8) + 3) ^ 0x1F;
else
value = -1;
cnt1 = get_unary(bc, 0, length);
if (cnt1 >= length) {
cnt1 = bitstream_read(bc, nbits);
} else {
pfx = 14 + (((value - 14) >> 32) & (value - 14));
cnt1 *= (1 << pfx) - 1;
shbits = bitstream_read(bc, pfx);
if (shbits <= 1) {
bitstream_unget(bc, shbits & 1, 1);
shbits = 1;
}
cnt1 += shbits - 1;
}
xflag = flag + cnt1;
yflag = xflag;
if (flag + cnt1 == 0) {
value = 0;
} else {
xflag &= 1u;
tmp = c * ((yflag + 1) >> 1) + (c >> 1);
value = xflag + (tmp ^ -xflag);
}
i++;
dst[j++] = value;
if (j == width) {
j = 0;
dst += stride;
}
state += d * yflag - (d * state >> 8);
flag = 0;
if (state * 4 > 0xFF || i >= size)
continue;
pfx = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
escape = av_mod_uintp2(16383, pfx);
cnt1 = get_unary(bc, 0, 8);
if (cnt1 < 8) {
if (pfx < 1 || pfx > 25)
return AVERROR_INVALIDDATA;
value = bitstream_read(bc, pfx);
if (value <= 1) {
bitstream_unget(bc, value & 1, 1);
value = 1;
}
rlen = value + escape * cnt1 - 1;
} else {
if (bitstream_read_bit(bc))
value = bitstream_read(bc, 16);
else
value = bitstream_read(bc, 8);
rlen = value + 8 * escape;
}
if (rlen > 0xFFFF || i + rlen > size)
return AVERROR_INVALIDDATA;
i += rlen;
for (k = 0; k < rlen; k++) {
dst[j++] = 0;
if (j == width) {
j = 0;
dst += stride;
}
}
state = 0;
flag = rlen < 0xFFFF ? 1 : 0;
}
bitstream_align(bc);
return bitstream_tell(bc) >> 3;
}
static int read_highpass(AVCodecContext *avctx, uint8_t *ptr,
int plane, AVFrame *frame)
{
PixletContext *ctx = avctx->priv_data;
ptrdiff_t stride = frame->linesize[plane] / 2;
int i, ret;
for (i = 0; i < ctx->levels * 3; i++) {
int32_t a = bytestream2_get_be32(&ctx->gb);
int32_t b = bytestream2_get_be32(&ctx->gb);
int32_t c = bytestream2_get_be32(&ctx->gb);
int32_t d = bytestream2_get_be32(&ctx->gb);
int16_t *dest = (int16_t *)frame->data[plane] +
ctx->band[plane][i + 1].x +
ctx->band[plane][i + 1].y * stride;
size_t size = ctx->band[plane][i + 1].size;
uint32_t magic = bytestream2_get_be32(&ctx->gb);
if (magic != PIXLET_MAGIC) {
av_log(avctx, AV_LOG_ERROR,
"wrong magic number: 0x%"PRIX32" for plane %d, band %d\n",
magic, plane, i);
return AVERROR_INVALIDDATA;
}
ret = read_high_coeffs(avctx, ptr + bytestream2_tell(&ctx->gb), dest,
size, c, (b >= FFABS(a)) ? b : a, d,
ctx->band[plane][i + 1].width, stride);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR,
"error in highpass coefficients for plane %d, band %d\n",
plane, i);
return ret;
}
bytestream2_skip(&ctx->gb, ret);
}
return 0;
}
static void line_add_sat_s16(int16_t *dst, const int16_t *src, size_t len)
{
int i;
for (i = 0; i < len; i++) {
int val = dst[i] + src[i];
dst[i] = av_clip_int16(val);
}
}
static void lowpass_prediction(int16_t *dst, int16_t *pred,
size_t width, size_t height, ptrdiff_t stride)
{
int i, j;
memset(pred, 0, width * sizeof(*pred));
for (i = 0; i < height; i++) {
line_add_sat_s16(pred, dst, width);
dst[0] = pred[0];
for (j = 1; j < width; j++)
dst[j] = pred[j] + dst[j - 1];
dst += stride;
}
}
static void filterfn(int16_t *dest, int16_t *tmp, size_t size, float SCALE)
{
int16_t *low, *high, *ll, *lh, *hl, *hh;
int hsize, i, j;
float value;
hsize = size >> 1;
low = tmp + 4;
high = &low[hsize + 8];
memcpy(low, dest, size);
memcpy(high, dest + hsize, size);
ll = &low[hsize];
lh = &low[hsize];
hl = &high[hsize];
hh = hl;
for (i = 4, j = 2; i; i--, j++, ll--, hh++, lh++, hl--) {
low[i - 5] = low[j - 1];
lh[0] = ll[-1];
high[i - 5] = high[j - 2];
hh[0] = hl[-2];
}
for (i = 0; i < hsize; i++) {
value = low [i + 1] * -0.07576144003329376f +
low [i + 0] * 0.8586296626673486f +
low [i - 1] * -0.07576144003329376f +
high[i + 0] * 0.3535533905932737f +
high[i - 1] * 0.3535533905932737f;
dest[i * 2] = av_clipf(value * SCALE, INT16_MIN, INT16_MAX);
}
for (i = 0; i < hsize; i++) {
value = low [i + 2] * -0.01515228715813062f +
low [i + 1] * 0.3687056777514043f +
low [i + 0] * 0.3687056777514043f +
low [i - 1] * -0.01515228715813062f +
high[i + 1] * 0.07071067811865475f +
high[i + 0] * -0.8485281374238569f +
high[i - 1] * 0.07071067811865475f;
dest[i * 2 + 1] = av_clipf(value * SCALE, INT16_MIN, INT16_MAX);
}
}
static void reconstruction(AVCodecContext *avctx, int16_t *dest,
size_t width, size_t height, ptrdiff_t stride,
float *scaling_h, float *scaling_v)
{
PixletContext *ctx = avctx->priv_data;
unsigned scaled_width, scaled_height;
int16_t *ptr, *tmp;
int i, j, k;
scaled_width = width >> NB_LEVELS;
scaled_height = height >> NB_LEVELS;
tmp = ctx->filter[0];
for (i = 0; i < NB_LEVELS; i++) {
float scale_v = scaling_v[i];
float scale_h = scaling_h[i];
scaled_width <<= 1;
scaled_height <<= 1;
ptr = dest;
for (j = 0; j < scaled_height; j++) {
filterfn(ptr, ctx->filter[1], scaled_width, scale_v);
ptr += stride;
}
for (j = 0; j < scaled_width; j++) {
ptr = dest + j;
for (k = 0; k < scaled_height; k++) {
tmp[k] = *ptr;
ptr += stride;
}
filterfn(tmp, ctx->filter[1], scaled_height, scale_h);
ptr = dest + j;
for (k = 0; k < scaled_height; k++) {
*ptr = tmp[k];
ptr += stride;
}
}
}
}
static void postprocess_luma(AVFrame *frame, size_t w, size_t h, int depth)
{
uint16_t *dsty = (uint16_t *)frame->data[0];
int16_t *srcy = (int16_t *)frame->data[0];
ptrdiff_t stridey = frame->linesize[0] / 2;
const float factor = 1.0f / ((1 << depth) - 1);
int i, j;
for (j = 0; j < h; j++) {
for (i = 0; i < w; i++)
dsty[i] = SQR(FFMAX(srcy[i], 0) * factor) * 65535;
dsty += stridey;
srcy += stridey;
}
}
static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
{
uint16_t *dstu = (uint16_t *)frame->data[1];
uint16_t *dstv = (uint16_t *)frame->data[2];
int16_t *srcu = (int16_t *)frame->data[1];
int16_t *srcv = (int16_t *)frame->data[2];
ptrdiff_t strideu = frame->linesize[1] / 2;
ptrdiff_t stridev = frame->linesize[2] / 2;
const unsigned add = 1 << (depth - 1);
const unsigned shift = 16 - depth;
int i, j;
for (j = 0; j < h; j++) {
for (i = 0; i < w; i++) {
dstu[i] = av_clip_uintp2_c(add + srcu[i], depth) << shift;
dstv[i] = av_clip_uintp2_c(add + srcv[i], depth) << shift;
}
dstu += strideu;
dstv += stridev;
srcu += strideu;
srcv += stridev;
}
}
static int decode_plane(AVCodecContext *avctx, int plane,
AVPacket *avpkt, AVFrame *frame)
{
PixletContext *ctx = avctx->priv_data;
ptrdiff_t stride = frame->linesize[plane] / 2;
unsigned shift = plane > 0;
int16_t *dst;
int i, ret;
for (i = ctx->levels - 1; i >= 0; i--) {
int32_t h = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
int32_t v = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
if (!h || !v)
return AVERROR_INVALIDDATA;
ctx->scaling[plane][H][i] = 1000000.0f / h;
ctx->scaling[plane][V][i] = 1000000.0f / v;
}
bytestream2_skip(&ctx->gb, 4);
dst = (int16_t *)frame->data[plane];
dst[0] = sign_extend(bytestream2_get_be16(&ctx->gb), 16);
ret = bitstream_init8(&ctx->bc, avpkt->data + bytestream2_tell(&ctx->gb),
bytestream2_get_bytes_left(&ctx->gb));
if (ret < 0)
return ret;
ret = read_low_coeffs(avctx, dst + 1, ctx->band[plane][0].width - 1,
ctx->band[plane][0].width - 1, 0);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR,
"error in lowpass coefficients for plane %d, top row\n", plane);
return ret;
}
ret = read_low_coeffs(avctx, dst + stride,
ctx->band[plane][0].height - 1, 1, stride);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR,
"error in lowpass coefficients for plane %d, left column\n",
plane);
return ret;
}
ret = read_low_coeffs(avctx, dst + stride + 1,
(ctx->band[plane][0].width - 1) * (ctx->band[plane][0].height - 1),
ctx->band[plane][0].width - 1, stride);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR,
"error in lowpass coefficients for plane %d, rest\n", plane);
return ret;
}
bytestream2_skip(&ctx->gb, ret);
if (bytestream2_get_bytes_left(&ctx->gb) <= 0) {
av_log(avctx, AV_LOG_ERROR, "no bytes left\n");
return AVERROR_INVALIDDATA;
}
ret = read_highpass(avctx, avpkt->data, plane, frame);
if (ret < 0)
return ret;
lowpass_prediction(dst, ctx->prediction, ctx->band[plane][0].width,
ctx->band[plane][0].height, stride);
reconstruction(avctx, (int16_t *)frame->data[plane], ctx->w >> shift,
ctx->h >> shift, stride, ctx->scaling[plane][H],
ctx->scaling[plane][V]);
return 0;
}
static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
PixletContext *ctx = avctx->priv_data;
int i, w, h, width, height, ret, version;
AVFrame *p = data;
ThreadFrame frame = { .f = data };
uint32_t pktsize;
bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
pktsize = bytestream2_get_be32(&ctx->gb);
if (pktsize <= 44 || pktsize - 4 > bytestream2_get_bytes_left(&ctx->gb)) {
av_log(avctx, AV_LOG_ERROR, "Invalid packet size %"PRIu32".\n", pktsize);
return AVERROR_INVALIDDATA;
}
version = bytestream2_get_le32(&ctx->gb);
if (version != 1)
avpriv_request_sample(avctx, "Version %d", version);
bytestream2_skip(&ctx->gb, 4);
if (bytestream2_get_be32(&ctx->gb) != 1)
return AVERROR_INVALIDDATA;
bytestream2_skip(&ctx->gb, 4);
width = bytestream2_get_be32(&ctx->gb);
height = bytestream2_get_be32(&ctx->gb);
w = FFALIGN(width, 1 << (NB_LEVELS + 1));
h = FFALIGN(height, 1 << (NB_LEVELS + 1));
ctx->levels = bytestream2_get_be32(&ctx->gb);
if (ctx->levels != NB_LEVELS)
return AVERROR_INVALIDDATA;
ctx->depth = bytestream2_get_be32(&ctx->gb);
if (ctx->depth < 8 || ctx->depth > 15) {
avpriv_request_sample(avctx, "Depth %d", ctx->depth);
return AVERROR_INVALIDDATA;
}
ret = ff_set_dimensions(avctx, w, h);
if (ret < 0)
return ret;
avctx->width = width;
avctx->height = height;
/* reinit should dimensions change */
if (ctx->w != w || ctx->h != h) {
pixlet_close(avctx);
ctx->w = w;
ctx->h = h;
ret = init_decoder(avctx);
if (ret < 0) {
pixlet_close(avctx);
ctx->w = 0;
ctx->h = 0;
return ret;
}
}
bytestream2_skip(&ctx->gb, 8);
ret = ff_thread_get_buffer(avctx, &frame, 0);
if (ret < 0)
return ret;
for (i = 0; i < 3; i++) {
ret = decode_plane(avctx, i, avpkt, frame.f);
if (ret < 0)
return ret;
if (avctx->flags & AV_CODEC_FLAG_GRAY)
break;
}
postprocess_luma(frame.f, ctx->w, ctx->h, ctx->depth);
postprocess_chroma(frame.f, ctx->w >> 1, ctx->h >> 1, ctx->depth);
p->pict_type = AV_PICTURE_TYPE_I;
p->color_range = AVCOL_RANGE_JPEG;
p->key_frame = 1;
*got_frame = 1;
return pktsize;
}
#if HAVE_THREADS
static int pixlet_init_thread_copy(AVCodecContext *avctx)
{
PixletContext *ctx = avctx->priv_data;
ctx->filter[0] = NULL;
ctx->filter[1] = NULL;
ctx->prediction = NULL;
ctx->w = 0;
ctx->h = 0;
return 0;
}
#endif /* HAVE_THREADS */
AVCodec ff_pixlet_decoder = {
.name = "pixlet",
.long_name = NULL_IF_CONFIG_SMALL("Apple Pixlet"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PIXLET,
.init = pixlet_init,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(pixlet_init_thread_copy),
.close = pixlet_close,
.decode = pixlet_decode_frame,
.priv_data_size = sizeof(PixletContext),
.capabilities = AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
};

View File

@ -28,7 +28,7 @@
#include "libavutil/version.h"
#define LIBAVCODEC_VERSION_MAJOR 57
#define LIBAVCODEC_VERSION_MINOR 34
#define LIBAVCODEC_VERSION_MINOR 35
#define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \

View File

@ -265,6 +265,8 @@ const AVCodecTag ff_codec_movvideo_tags[] = {
{ AV_CODEC_ID_MAGICYUV, MKTAG('M', '8', 'Y', '4') },
{ AV_CODEC_ID_MAGICYUV, MKTAG('M', '8', 'Y', 'A') },
{ AV_CODEC_ID_PIXLET, MKTAG('p', 'x', 'l', 't') },
{ AV_CODEC_ID_NONE, 0 },
};

View File

@ -246,6 +246,9 @@ fate-nuv: $(FATE_NUV)
FATE_SAMPLES_AVCONV-$(call DEMDEC, PAF, PAF_VIDEO) += fate-paf-video
fate-paf-video: CMD = framecrc -i $(TARGET_SAMPLES)/paf/hod1-partial.paf -pix_fmt rgb24 -an
FATE_SAMPLES_AVCONV-$(call DEMDEC, MOV, PIXLET) += fate-pixlet
fate-pixlet: CMD = framecrc -i $(TARGET_SAMPLES)/pxlt/pixlet.mov -an
FATE_SAMPLES_AVCONV-$(call DEMDEC, AVI, QPEG) += fate-qpeg
fate-qpeg: CMD = framecrc -i $(TARGET_SAMPLES)/qpeg/Clock.avi -an -pix_fmt rgb24

2
tests/ref/fate/pixlet Normal file
View File

@ -0,0 +1,2 @@
#tb 0: 1/25
0, 0, 0, 1, 2764800, 0xd0b6bf48