1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

avfilter/vf_framepack: add >8 format support

This commit is contained in:
Paul B Mahol 2021-02-09 12:25:48 +01:00
parent 73ff84c3d4
commit cfcc36240f

View File

@ -44,6 +44,7 @@
typedef struct FramepackContext { typedef struct FramepackContext {
const AVClass *class; const AVClass *class;
int depth;
const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
enum AVStereo3DType format; ///< frame pack type output enum AVStereo3DType format; ///< frame pack type output
@ -52,9 +53,29 @@ typedef struct FramepackContext {
} FramepackContext; } FramepackContext;
static const enum AVPixelFormat formats_supported[] = { static const enum AVPixelFormat formats_supported[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9,
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14,
AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_GRAY16,
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUVJ411P,
AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
AV_PIX_FMT_YUV440P10,
AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
AV_PIX_FMT_YUV440P12,
AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA444P16,
AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA422P16,
AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
AV_PIX_FMT_NONE AV_PIX_FMT_NONE
}; };
@ -113,6 +134,7 @@ static int config_output(AVFilterLink *outlink)
s->pix_desc = av_pix_fmt_desc_get(outlink->format); s->pix_desc = av_pix_fmt_desc_get(outlink->format);
if (!s->pix_desc) if (!s->pix_desc)
return AVERROR_BUG; return AVERROR_BUG;
s->depth = s->pix_desc->comp[0].depth;
// modify output properties as needed // modify output properties as needed
switch (s->format) { switch (s->format) {
@ -149,7 +171,7 @@ static void horizontal_frame_pack(AVFilterLink *outlink,
FramepackContext *s = ctx->priv; FramepackContext *s = ctx->priv;
int i, plane; int i, plane;
if (interleaved) { if (interleaved && s->depth <= 8) {
const uint8_t *leftp = s->input_views[LEFT]->data[0]; const uint8_t *leftp = s->input_views[LEFT]->data[0];
const uint8_t *rightp = s->input_views[RIGHT]->data[0]; const uint8_t *rightp = s->input_views[RIGHT]->data[0];
uint8_t *dstp = out->data[0]; uint8_t *dstp = out->data[0];
@ -184,17 +206,53 @@ static void horizontal_frame_pack(AVFilterLink *outlink,
} }
} }
} }
} else if (interleaved && s->depth > 8) {
const uint16_t *leftp = (const uint16_t *)s->input_views[LEFT]->data[0];
const uint16_t *rightp = (const uint16_t *)s->input_views[RIGHT]->data[0];
uint16_t *dstp = (uint16_t *)out->data[0];
int length = out->width / 2;
int lines = out->height;
for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
if (plane == 1 || plane == 2) {
length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
}
for (i = 0; i < lines; i++) {
int j;
leftp = (const uint16_t *)s->input_views[LEFT]->data[plane] +
s->input_views[LEFT]->linesize[plane] * i / 2;
rightp = (const uint16_t *)s->input_views[RIGHT]->data[plane] +
s->input_views[RIGHT]->linesize[plane] * i / 2;
dstp = (uint16_t *)out->data[plane] + out->linesize[plane] * i / 2;
for (j = 0; j < length; j++) {
// interpolate chroma as necessary
if ((s->pix_desc->log2_chroma_w ||
s->pix_desc->log2_chroma_h) &&
(plane == 1 || plane == 2)) {
*dstp++ = (*leftp + *rightp) / 2;
*dstp++ = (*leftp + *rightp) / 2;
} else {
*dstp++ = *leftp;
*dstp++ = *rightp;
}
leftp += 1;
rightp += 1;
}
}
}
} else { } else {
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
const int psize = 1 + (s->depth > 8);
const uint8_t *src[4]; const uint8_t *src[4];
uint8_t *dst[4]; uint8_t *dst[4];
int sub_w = s->input_views[i]->width >> s->pix_desc->log2_chroma_w; int sub_w = psize * s->input_views[i]->width >> s->pix_desc->log2_chroma_w;
src[0] = s->input_views[i]->data[0]; src[0] = s->input_views[i]->data[0];
src[1] = s->input_views[i]->data[1]; src[1] = s->input_views[i]->data[1];
src[2] = s->input_views[i]->data[2]; src[2] = s->input_views[i]->data[2];
dst[0] = out->data[0] + i * s->input_views[i]->width; dst[0] = out->data[0] + i * s->input_views[i]->width * psize;
dst[1] = out->data[1] + i * sub_w; dst[1] = out->data[1] + i * sub_w;
dst[2] = out->data[2] + i * sub_w; dst[2] = out->data[2] + i * sub_w;