mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-11-26 19:01:44 +02:00
532b23f079
This adds tons of code for no other benefit than making VideoToolbox support conform with the new hwaccel API (using hw_device_ctx and hw_frames_ctx). Since VideoToolbox decoding does not actually require the user to allocate frames, the new code does mostly nothing. One benefit is that ffmpeg_videotoolbox.c can be dropped once generic hwaccel support for ffmpeg.c is merged from Libav. Does not consider VDA or VideoToolbox encoding. Fun fact: the frame transfer functions are copied from vaapi, as the mapping makes copying generic boilerplate. Mapping itself is not exported by the VT code, because I don't know how to test.
244 lines
6.8 KiB
C
244 lines
6.8 KiB
C
/*
|
|
* This file is part of FFmpeg.
|
|
*
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
*
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
#include "config.h"
|
|
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
|
|
#include <VideoToolbox/VideoToolbox.h>
|
|
|
|
#include "buffer.h"
|
|
#include "common.h"
|
|
#include "hwcontext.h"
|
|
#include "hwcontext_internal.h"
|
|
#include "hwcontext_videotoolbox.h"
|
|
#include "mem.h"
|
|
#include "pixfmt.h"
|
|
#include "pixdesc.h"
|
|
|
|
static const struct {
|
|
uint32_t cv_fmt;
|
|
enum AVPixelFormat pix_fmt;
|
|
} cv_pix_fmts[] = {
|
|
{ kCVPixelFormatType_420YpCbCr8Planar, AV_PIX_FMT_YUV420P },
|
|
{ kCVPixelFormatType_422YpCbCr8, AV_PIX_FMT_UYVY422 },
|
|
{ kCVPixelFormatType_32BGRA, AV_PIX_FMT_BGRA },
|
|
#ifdef kCFCoreFoundationVersionNumber10_7
|
|
{ kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, AV_PIX_FMT_NV12 },
|
|
#endif
|
|
};
|
|
|
|
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
|
|
{
|
|
int i;
|
|
for (i = 0; i < FF_ARRAY_ELEMS(cv_pix_fmts); i++) {
|
|
if (cv_pix_fmts[i].cv_fmt == cv_fmt)
|
|
return cv_pix_fmts[i].pix_fmt;
|
|
}
|
|
return AV_PIX_FMT_NONE;
|
|
}
|
|
|
|
uint32_t av_map_videotoolbox_format_from_pixfmt(enum AVPixelFormat pix_fmt)
|
|
{
|
|
int i;
|
|
for (i = 0; i < FF_ARRAY_ELEMS(cv_pix_fmts); i++) {
|
|
if (cv_pix_fmts[i].pix_fmt == pix_fmt)
|
|
return cv_pix_fmts[i].cv_fmt;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int vt_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
|
|
{
|
|
frame->buf[0] = av_buffer_pool_get(ctx->pool);
|
|
if (!frame->buf[0])
|
|
return AVERROR(ENOMEM);
|
|
|
|
frame->data[3] = frame->buf[0]->data;
|
|
frame->format = AV_PIX_FMT_VIDEOTOOLBOX;
|
|
frame->width = ctx->width;
|
|
frame->height = ctx->height;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int vt_transfer_get_formats(AVHWFramesContext *ctx,
|
|
enum AVHWFrameTransferDirection dir,
|
|
enum AVPixelFormat **formats)
|
|
{
|
|
enum AVPixelFormat *fmts = av_malloc_array(2, sizeof(*fmts));
|
|
if (!fmts)
|
|
return AVERROR(ENOMEM);
|
|
|
|
fmts[0] = ctx->sw_format;
|
|
fmts[1] = AV_PIX_FMT_NONE;
|
|
|
|
*formats = fmts;
|
|
return 0;
|
|
}
|
|
|
|
static void vt_unmap(AVHWFramesContext *ctx, HWMapDescriptor *hwmap)
|
|
{
|
|
CVPixelBufferRef pixbuf = (CVPixelBufferRef)hwmap->source->data[3];
|
|
|
|
CVPixelBufferUnlockBaseAddress(pixbuf, (uintptr_t)hwmap->priv);
|
|
}
|
|
|
|
static int vt_map_frame(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src,
|
|
int flags)
|
|
{
|
|
CVPixelBufferRef pixbuf = (CVPixelBufferRef)src->data[3];
|
|
OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
|
|
CVReturn err;
|
|
uint32_t map_flags = 0;
|
|
int ret;
|
|
int i;
|
|
enum AVPixelFormat format;
|
|
|
|
format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
|
|
if (dst->format != format) {
|
|
av_log(ctx, AV_LOG_ERROR, "Unsupported or mismatching pixel format: %s\n",
|
|
av_fourcc2str(pixel_format));
|
|
return AVERROR_UNKNOWN;
|
|
}
|
|
|
|
if (CVPixelBufferGetWidth(pixbuf) != ctx->width ||
|
|
CVPixelBufferGetHeight(pixbuf) != ctx->height) {
|
|
av_log(ctx, AV_LOG_ERROR, "Inconsistent frame dimensions.\n");
|
|
return AVERROR_UNKNOWN;
|
|
}
|
|
|
|
if (flags == AV_HWFRAME_MAP_READ)
|
|
map_flags = kCVPixelBufferLock_ReadOnly;
|
|
|
|
err = CVPixelBufferLockBaseAddress(pixbuf, map_flags);
|
|
if (err != kCVReturnSuccess) {
|
|
av_log(ctx, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
|
|
return AVERROR_UNKNOWN;
|
|
}
|
|
|
|
if (CVPixelBufferIsPlanar(pixbuf)) {
|
|
int planes = CVPixelBufferGetPlaneCount(pixbuf);
|
|
for (i = 0; i < planes; i++) {
|
|
dst->data[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
|
|
dst->linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
|
|
}
|
|
} else {
|
|
dst->data[0] = CVPixelBufferGetBaseAddress(pixbuf);
|
|
dst->linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
|
|
}
|
|
|
|
ret = ff_hwframe_map_create(src->hw_frames_ctx, dst, src, vt_unmap,
|
|
(void *)(uintptr_t)map_flags);
|
|
if (ret < 0)
|
|
goto unlock;
|
|
|
|
return 0;
|
|
|
|
unlock:
|
|
CVPixelBufferUnlockBaseAddress(pixbuf, map_flags);
|
|
return ret;
|
|
}
|
|
|
|
static int vt_transfer_data_from(AVHWFramesContext *hwfc,
|
|
AVFrame *dst, const AVFrame *src)
|
|
{
|
|
AVFrame *map;
|
|
int err;
|
|
|
|
if (dst->width > hwfc->width || dst->height > hwfc->height)
|
|
return AVERROR(EINVAL);
|
|
|
|
map = av_frame_alloc();
|
|
if (!map)
|
|
return AVERROR(ENOMEM);
|
|
map->format = dst->format;
|
|
|
|
err = vt_map_frame(hwfc, map, src, AV_HWFRAME_MAP_READ);
|
|
if (err)
|
|
goto fail;
|
|
|
|
map->width = dst->width;
|
|
map->height = dst->height;
|
|
|
|
err = av_frame_copy(dst, map);
|
|
if (err)
|
|
goto fail;
|
|
|
|
err = 0;
|
|
fail:
|
|
av_frame_free(&map);
|
|
return err;
|
|
}
|
|
|
|
static int vt_transfer_data_to(AVHWFramesContext *hwfc,
|
|
AVFrame *dst, const AVFrame *src)
|
|
{
|
|
AVFrame *map;
|
|
int err;
|
|
|
|
if (src->width > hwfc->width || src->height > hwfc->height)
|
|
return AVERROR(EINVAL);
|
|
|
|
map = av_frame_alloc();
|
|
if (!map)
|
|
return AVERROR(ENOMEM);
|
|
map->format = src->format;
|
|
|
|
err = vt_map_frame(hwfc, map, dst, AV_HWFRAME_MAP_WRITE | AV_HWFRAME_MAP_OVERWRITE);
|
|
if (err)
|
|
goto fail;
|
|
|
|
map->width = src->width;
|
|
map->height = src->height;
|
|
|
|
err = av_frame_copy(map, src);
|
|
if (err)
|
|
goto fail;
|
|
|
|
err = 0;
|
|
fail:
|
|
av_frame_free(&map);
|
|
return err;
|
|
}
|
|
|
|
static int vt_device_create(AVHWDeviceContext *ctx, const char *device,
|
|
AVDictionary *opts, int flags)
|
|
{
|
|
if (device && device[0]) {
|
|
av_log(ctx, AV_LOG_ERROR, "Device selection unsupported.\n");
|
|
return AVERROR_UNKNOWN;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
const HWContextType ff_hwcontext_type_videotoolbox = {
|
|
.type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX,
|
|
.name = "videotoolbox",
|
|
|
|
.device_create = vt_device_create,
|
|
.frames_get_buffer = vt_get_buffer,
|
|
.transfer_get_formats = vt_transfer_get_formats,
|
|
.transfer_data_to = vt_transfer_data_to,
|
|
.transfer_data_from = vt_transfer_data_from,
|
|
|
|
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VIDEOTOOLBOX, AV_PIX_FMT_NONE },
|
|
};
|