2003-02-21 22:35:18 +02:00
/*
2011-11-18 23:44:26 +03:00
* Copyright ( C ) 2001 - 2011 Michael Niedermayer < michaelni @ gmx . at >
2006-10-07 18:33:14 +03:00
*
* This file is part of FFmpeg .
*
2006-11-19 12:38:12 +02:00
* FFmpeg is free software ; you can redistribute it and / or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation ; either
* version 2.1 of the License , or ( at your option ) any later version .
2006-10-07 18:33:14 +03:00
*
* FFmpeg is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
2006-11-19 12:38:12 +02:00
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* Lesser General Public License for more details .
2006-10-07 18:33:14 +03:00
*
2006-11-19 12:38:12 +02:00
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg ; if not , write to the Free Software
2007-07-05 13:18:58 +03:00
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2006-10-07 18:33:14 +03:00
*/
2003-02-21 22:35:18 +02:00
2008-08-31 10:42:11 +03:00
# ifndef SWSCALE_SWSCALE_INTERNAL_H
# define SWSCALE_SWSCALE_INTERNAL_H
2003-02-21 22:35:18 +02:00
swscale/swscale: Fix races when using unaligned strides/data
In this case the current code tries to warn once; to do so, it uses
ordinary static ints to store whether the warning has already been
emitted. This is both a data race (and therefore undefined behaviour)
as well as a race condition, because it is really possible for multiple
threads to be the one thread to emit the warning. This is actually
common since the introduction of the new multithreaded scaling API.
This commit fixes this by using atomic integers for the state;
furthermore, these are not static anymore, but rather contained
in the user-facing SwsContext (i.e. the parent SwsContext in case
of slice-threading).
Given that these atomic variables are not intended for synchronization
at all (but only for atomicity, i.e. only to output the warning once),
the atomic operations use memory_order_relaxed.
This affected the nv12, nv21, yuv420, yuv420p10, yuv422, yuv422p10 and
yuv444 filter-overlay FATE-tests.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
2021-09-18 05:11:57 +02:00
# include <stdatomic.h>
2007-12-07 17:30:12 +02:00
# include "config.h"
2014-07-19 06:17:09 +03:00
# include "version.h"
2012-10-06 14:29:37 +03:00
# include "libavutil/avassert.h"
2008-05-09 15:00:25 +03:00
# include "libavutil/avutil.h"
2012-08-06 16:49:32 +03:00
# include "libavutil/common.h"
2021-06-24 13:11:34 +02:00
# include "libavutil/frame.h"
2012-10-30 20:39:44 +03:00
# include "libavutil/intreadwrite.h"
2011-06-04 15:34:27 +03:00
# include "libavutil/log.h"
2020-05-27 14:54:38 +02:00
# include "libavutil/mem_internal.h"
2011-06-04 15:12:08 +03:00
# include "libavutil/pixfmt.h"
2011-11-07 03:52:27 +03:00
# include "libavutil/pixdesc.h"
2021-07-12 12:31:14 +02:00
# include "libavutil/slicethread.h"
2016-12-12 17:00:04 +02:00
# include "libavutil/ppc/util_altivec.h"
2006-07-20 15:13:57 +03:00
2011-12-30 20:37:16 +03:00
# define STR(s) AV_TOSTRING(s) // AV_STRINGIFY is too long
2008-09-08 00:06:21 +03:00
2016-01-14 04:05:11 +02:00
# define YUVRGB_TABLE_HEADROOM 512
# define YUVRGB_TABLE_LUMA_HEADROOM 512
2012-01-10 22:56:15 +03:00
2014-06-11 19:51:49 +03:00
# define MAX_FILTER_SIZE SWS_MAX_FILTER_SIZE
2003-02-21 22:35:18 +02:00
2011-06-04 07:31:35 +03:00
# define DITHER1XBPP
2009-07-26 15:26:32 +03:00
# if HAVE_BIGENDIAN
2008-09-05 00:59:15 +03:00
# define ALT32_CORR (-1)
# else
# define ALT32_CORR 1
2008-09-08 00:06:21 +03:00
# endif
2009-01-14 02:13:56 +02:00
# if ARCH_X86_64
2011-12-30 20:37:16 +03:00
# define APCK_PTR2 8
2008-09-08 00:06:21 +03:00
# define APCK_COEF 16
# define APCK_SIZE 24
# else
2011-12-30 20:37:16 +03:00
# define APCK_PTR2 4
# define APCK_COEF 8
2008-09-08 00:06:21 +03:00
# define APCK_SIZE 16
2008-09-05 00:59:15 +03:00
# endif
2014-10-08 03:05:54 +03:00
# define RETCODE_USE_CASCADE -12345
2009-01-25 14:03:28 +02:00
struct SwsContext ;
2013-08-14 18:19:32 +03:00
typedef enum SwsDither {
SWS_DITHER_NONE = 0 ,
SWS_DITHER_AUTO ,
SWS_DITHER_BAYER ,
SWS_DITHER_ED ,
2014-03-23 04:13:26 +03:00
SWS_DITHER_A_DITHER ,
SWS_DITHER_X_DITHER ,
2013-08-14 18:19:32 +03:00
NB_SWS_DITHER ,
} SwsDither ;
2015-08-06 16:36:05 +02:00
typedef enum SwsAlphaBlend {
SWS_ALPHA_BLEND_NONE = 0 ,
SWS_ALPHA_BLEND_UNIFORM ,
2015-08-09 17:11:53 +02:00
SWS_ALPHA_BLEND_CHECKERBOARD ,
2015-08-06 16:36:05 +02:00
SWS_ALPHA_BLEND_NB ,
} SwsAlphaBlend ;
2021-06-24 13:11:34 +02:00
typedef struct Range {
unsigned int start ;
unsigned int len ;
} Range ;
typedef struct RangeList {
Range * ranges ;
unsigned int nb_ranges ;
int ranges_allocated ;
} RangeList ;
int ff_range_add ( RangeList * r , unsigned int start , unsigned int len ) ;
2011-12-30 20:37:16 +03:00
typedef int ( * SwsFunc ) ( struct SwsContext * context , const uint8_t * src [ ] ,
2009-04-13 01:43:03 +03:00
int srcStride [ ] , int srcSliceY , int srcSliceH ,
2011-12-30 20:37:16 +03:00
uint8_t * dst [ ] , int dstStride [ ] ) ;
2011-09-30 15:50:49 +03:00
2011-06-28 20:42:17 +03:00
/**
2011-09-30 15:50:49 +03:00
* Write one line of horizontally scaled data to planar output
2011-06-28 20:42:17 +03:00
* without any additional vertical scaling ( or point - scaling ) .
*
2016-04-27 19:45:23 +02:00
* @ param src scaled source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param dest pointer to the output plane . For > 8 - bit
2011-08-03 01:42:35 +03:00
* output , this is in uint16_t
2011-09-30 15:50:49 +03:00
* @ param dstW width of destination in pixels
* @ param dither ordered dither array of type int16_t and size 8
* @ param offset Dither offset
2011-06-28 20:42:17 +03:00
*/
2011-12-30 20:37:16 +03:00
typedef void ( * yuv2planar1_fn ) ( const int16_t * src , uint8_t * dest , int dstW ,
const uint8_t * dither , int offset ) ;
2011-09-30 15:50:49 +03:00
2011-06-28 20:42:17 +03:00
/**
2011-10-04 14:22:03 +03:00
* Write one line of horizontally scaled data to planar output
2011-06-28 20:42:17 +03:00
* with multi - point vertical scaling between input pixels .
*
2016-04-27 19:45:23 +02:00
* @ param filter vertical luma / alpha scaling coefficients , 12 bits [ 0 , 4096 ]
* @ param src scaled luma ( Y ) or alpha ( A ) source data , 15 bits for
* 8 - 10 - bit output , 19 bits for 16 - bit output ( in int32_t )
2011-10-04 14:22:03 +03:00
* @ param filterSize number of vertical input lines to scale
2016-04-27 19:45:23 +02:00
* @ param dest pointer to output plane . For > 8 - bit
2011-10-04 14:22:03 +03:00
* output , this is in uint16_t
* @ param dstW width of destination pixels
* @ param offset Dither offset
*/
2011-12-30 20:37:16 +03:00
typedef void ( * yuv2planarX_fn ) ( const int16_t * filter , int filterSize ,
const int16_t * * src , uint8_t * dest , int dstW ,
const uint8_t * dither , int offset ) ;
2011-10-04 14:22:03 +03:00
/**
* Write one line of horizontally scaled chroma to interleaved output
* with multi - point vertical scaling between input pixels .
*
2020-04-26 04:37:01 +02:00
* @ param dstFormat destination pixel format
* @ param chrDither ordered dither array of type uint8_t and size 8
2016-04-27 19:45:23 +02:00
* @ param chrFilter vertical chroma scaling coefficients , 12 bits [ 0 , 4096 ]
* @ param chrUSrc scaled chroma ( U ) source data , 15 bits for 8 - 10 - bit
* output , 19 bits for 16 - bit output ( in int32_t )
* @ param chrVSrc scaled chroma ( V ) source data , 15 bits for 8 - 10 - bit
* output , 19 bits for 16 - bit output ( in int32_t )
2011-06-28 20:42:17 +03:00
* @ param chrFilterSize number of vertical chroma input lines to scale
2016-04-27 19:45:23 +02:00
* @ param dest pointer to the output plane . For > 8 - bit
2011-08-03 01:42:35 +03:00
* output , this is in uint16_t
2011-10-04 14:22:03 +03:00
* @ param dstW width of chroma planes
2011-06-28 20:42:17 +03:00
*/
2020-04-26 04:37:03 +02:00
typedef void ( * yuv2interleavedX_fn ) ( enum AVPixelFormat dstFormat ,
const uint8_t * chrDither ,
2011-12-30 20:37:16 +03:00
const int16_t * chrFilter ,
int chrFilterSize ,
const int16_t * * chrUSrc ,
const int16_t * * chrVSrc ,
uint8_t * dest , int dstW ) ;
2011-10-04 14:22:03 +03:00
2011-06-28 20:42:17 +03:00
/**
* Write one line of horizontally scaled Y / U / V / A to packed - pixel YUV / RGB
* output without any additional vertical scaling ( or point - scaling ) . Note
* that this function may do chroma scaling , see the " uvalpha " argument .
*
* @ param c SWS scaling context
2016-04-27 19:45:23 +02:00
* @ param lumSrc scaled luma ( Y ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param chrUSrc scaled chroma ( U ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param chrVSrc scaled chroma ( V ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param alpSrc scaled alpha ( A ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param dest pointer to the output plane . For 16 - bit output , this is
2011-08-03 01:42:35 +03:00
* uint16_t
2011-06-28 20:42:17 +03:00
* @ param dstW width of lumSrc and alpSrc in pixels , number of pixels
* to write into dest [ ]
* @ param uvalpha chroma scaling coefficient for the second line of chroma
* pixels , either 2048 or 0. If 0 , one chroma input is used
* for 2 output pixels ( or if the SWS_FLAG_FULL_CHR_INT flag
* is set , it generates 1 output pixel ) . If 2048 , two chroma
* input pixels should be averaged for 2 output pixels ( this
* only happens if SWS_FLAG_FULL_CHR_INT is not set )
* @ param y vertical line number for this output . This does not need
* to be used to calculate the offset in the destination ,
* but can be used to generate comfort noise using dithering
* for some output formats .
*/
2011-12-30 20:37:16 +03:00
typedef void ( * yuv2packed1_fn ) ( struct SwsContext * c , const int16_t * lumSrc ,
const int16_t * chrUSrc [ 2 ] ,
const int16_t * chrVSrc [ 2 ] ,
const int16_t * alpSrc , uint8_t * dest ,
int dstW , int uvalpha , int y ) ;
2011-06-28 20:42:17 +03:00
/**
* Write one line of horizontally scaled Y / U / V / A to packed - pixel YUV / RGB
* output by doing bilinear scaling between two input lines .
*
* @ param c SWS scaling context
2016-04-27 19:45:23 +02:00
* @ param lumSrc scaled luma ( Y ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param chrUSrc scaled chroma ( U ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param chrVSrc scaled chroma ( V ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param alpSrc scaled alpha ( A ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param dest pointer to the output plane . For 16 - bit output , this is
2011-08-03 01:42:35 +03:00
* uint16_t
2011-06-28 20:42:17 +03:00
* @ param dstW width of lumSrc and alpSrc in pixels , number of pixels
* to write into dest [ ]
* @ param yalpha luma / alpha scaling coefficients for the second input line .
* The first line ' s coefficients can be calculated by using
* 4096 - yalpha
* @ param uvalpha chroma scaling coefficient for the second input line . The
* first line ' s coefficients can be calculated by using
* 4096 - uvalpha
* @ param y vertical line number for this output . This does not need
* to be used to calculate the offset in the destination ,
* but can be used to generate comfort noise using dithering
* for some output formats .
*/
2011-12-30 20:37:16 +03:00
typedef void ( * yuv2packed2_fn ) ( struct SwsContext * c , const int16_t * lumSrc [ 2 ] ,
const int16_t * chrUSrc [ 2 ] ,
const int16_t * chrVSrc [ 2 ] ,
const int16_t * alpSrc [ 2 ] ,
uint8_t * dest ,
int dstW , int yalpha , int uvalpha , int y ) ;
2011-06-28 20:42:17 +03:00
/**
* Write one line of horizontally scaled Y / U / V / A to packed - pixel YUV / RGB
* output by doing multi - point vertical scaling between input pixels .
*
* @ param c SWS scaling context
2016-04-27 19:45:23 +02:00
* @ param lumFilter vertical luma / alpha scaling coefficients , 12 bits [ 0 , 4096 ]
* @ param lumSrc scaled luma ( Y ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
2011-06-28 20:42:17 +03:00
* @ param lumFilterSize number of vertical luma / alpha input lines to scale
2016-04-27 19:45:23 +02:00
* @ param chrFilter vertical chroma scaling coefficients , 12 bits [ 0 , 4096 ]
* @ param chrUSrc scaled chroma ( U ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param chrVSrc scaled chroma ( V ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
2011-06-28 20:42:17 +03:00
* @ param chrFilterSize number of vertical chroma input lines to scale
2016-04-27 19:45:23 +02:00
* @ param alpSrc scaled alpha ( A ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param dest pointer to the output plane . For 16 - bit output , this is
2011-08-03 01:42:35 +03:00
* uint16_t
2011-06-28 20:42:17 +03:00
* @ param dstW width of lumSrc and alpSrc in pixels , number of pixels
* to write into dest [ ]
* @ param y vertical line number for this output . This does not need
* to be used to calculate the offset in the destination ,
* but can be used to generate comfort noise using dithering
* or some output formats .
*/
2011-12-30 20:37:16 +03:00
typedef void ( * yuv2packedX_fn ) ( struct SwsContext * c , const int16_t * lumFilter ,
const int16_t * * lumSrc , int lumFilterSize ,
const int16_t * chrFilter ,
const int16_t * * chrUSrc ,
const int16_t * * chrVSrc , int chrFilterSize ,
const int16_t * * alpSrc , uint8_t * dest ,
int dstW , int y ) ;
2011-06-06 05:31:11 +03:00
2013-01-25 23:51:25 +03:00
/**
* Write one line of horizontally scaled Y / U / V / A to YUV / RGB
* output by doing multi - point vertical scaling between input pixels .
*
* @ param c SWS scaling context
2016-04-27 19:45:23 +02:00
* @ param lumFilter vertical luma / alpha scaling coefficients , 12 bits [ 0 , 4096 ]
* @ param lumSrc scaled luma ( Y ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
2013-01-25 23:51:25 +03:00
* @ param lumFilterSize number of vertical luma / alpha input lines to scale
2016-04-27 19:45:23 +02:00
* @ param chrFilter vertical chroma scaling coefficients , 12 bits [ 0 , 4096 ]
* @ param chrUSrc scaled chroma ( U ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param chrVSrc scaled chroma ( V ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
2013-01-25 23:51:25 +03:00
* @ param chrFilterSize number of vertical chroma input lines to scale
2016-04-27 19:45:23 +02:00
* @ param alpSrc scaled alpha ( A ) source data , 15 bits for 8 - 10 - bit output ,
* 19 bits for 16 - bit output ( in int32_t )
* @ param dest pointer to the output planes . For 16 - bit output , this is
2013-01-25 23:51:25 +03:00
* uint16_t
* @ param dstW width of lumSrc and alpSrc in pixels , number of pixels
* to write into dest [ ]
* @ param y vertical line number for this output . This does not need
* to be used to calculate the offset in the destination ,
* but can be used to generate comfort noise using dithering
* or some output formats .
*/
typedef void ( * yuv2anyX_fn ) ( struct SwsContext * c , const int16_t * lumFilter ,
2013-01-25 23:51:25 +03:00
const int16_t * * lumSrc , int lumFilterSize ,
const int16_t * chrFilter ,
const int16_t * * chrUSrc ,
const int16_t * * chrVSrc , int chrFilterSize ,
const int16_t * * alpSrc , uint8_t * * dest ,
int dstW , int y ) ;
2013-01-25 23:51:25 +03:00
2015-08-17 22:03:20 +02:00
struct SwsSlice ;
struct SwsFilterDescriptor ;
2008-07-04 16:49:45 +03:00
/* This struct should be aligned on at least a 32-byte boundary. */
2009-08-17 00:11:28 +03:00
typedef struct SwsContext {
2007-04-29 16:39:27 +03:00
/**
* info on struct for av_log
*/
2008-03-10 16:30:49 +02:00
const AVClass * av_class ;
2007-04-29 16:39:27 +03:00
2021-09-18 04:46:45 +02:00
struct SwsContext * parent ;
2021-07-12 12:31:14 +02:00
AVSliceThread * slicethread ;
struct SwsContext * * slice_ctx ;
int * slice_err ;
int nb_slice_ctx ;
// values passed to current sws_receive_slice() call
2021-09-30 17:52:53 +02:00
int dst_slice_start ;
int dst_slice_height ;
2021-07-12 12:31:14 +02:00
2007-04-29 16:39:27 +03:00
/**
2007-12-21 13:47:09 +02:00
* Note that src , dst , srcStride , dstStride will be copied in the
* sws_scale ( ) wrapper so they can be freely modified here .
2007-04-29 16:39:27 +03:00
*/
2021-06-10 17:10:35 +02:00
SwsFunc convert_unscaled ;
2010-01-16 21:04:55 +02:00
int srcW ; ///< Width of source luma/alpha planes.
int srcH ; ///< Height of source luma/alpha planes.
int dstH ; ///< Height of destination luma/alpha planes.
int chrSrcW ; ///< Width of source chroma planes.
int chrSrcH ; ///< Height of source chroma planes.
int chrDstW ; ///< Width of destination chroma planes.
int chrDstH ; ///< Height of destination chroma planes.
2007-04-29 16:39:27 +03:00
int lumXInc , chrXInc ;
int lumYInc , chrYInc ;
2012-10-06 13:10:34 +03:00
enum AVPixelFormat dstFormat ; ///< Destination pixel format.
enum AVPixelFormat srcFormat ; ///< Source pixel format.
2010-01-24 14:55:05 +02:00
int dstFormatBpp ; ///< Number of bits per pixel of the destination pixel format.
int srcFormatBpp ; ///< Number of bits per pixel of the source pixel format.
2011-08-03 01:42:35 +03:00
int dstBpc , srcBpc ;
2010-01-20 05:26:12 +02:00
int chrSrcHSubSample ; ///< Binary logarithm of horizontal subsampling factor between luma/alpha and chroma planes in source image.
int chrSrcVSubSample ; ///< Binary logarithm of vertical subsampling factor between luma/alpha and chroma planes in source image.
int chrDstHSubSample ; ///< Binary logarithm of horizontal subsampling factor between luma/alpha and chroma planes in destination image.
int chrDstVSubSample ; ///< Binary logarithm of vertical subsampling factor between luma/alpha and chroma planes in destination image.
int vChrDrop ; ///< Binary logarithm of extra vertical subsampling factor in source image chroma planes specified by user.
int sliceDir ; ///< Direction that slices are fed to the scaler (1 = top-to-bottom, -1 = bottom-to-top).
2021-07-12 12:31:14 +02:00
int nb_threads ; ///< Number of threads used for scaling
2010-01-16 21:04:55 +02:00
double param [ 2 ] ; ///< Input parameters for scaling algorithms that need them.
2007-04-29 16:39:27 +03:00
2021-06-24 13:11:34 +02:00
AVFrame * frame_src ;
AVFrame * frame_dst ;
RangeList src_ranges ;
2014-10-08 03:05:54 +03:00
/* The cascaded_* fields allow spliting a scaler task into multiple
* sequential steps , this is for example used to limit the maximum
* downscaling factor that needs to be supported in one scaler .
*/
2015-04-17 22:08:42 +02:00
struct SwsContext * cascaded_context [ 3 ] ;
2014-10-08 03:05:54 +03:00
int cascaded_tmpStride [ 4 ] ;
uint8_t * cascaded_tmp [ 4 ] ;
2015-04-17 22:08:42 +02:00
int cascaded1_tmpStride [ 4 ] ;
uint8_t * cascaded1_tmp [ 4 ] ;
2015-09-10 17:01:47 +02:00
int cascaded_mainindex ;
2015-04-17 22:08:42 +02:00
double gamma_value ;
int gamma_flag ;
2015-04-23 20:13:58 +02:00
int is_internal_gamma ;
2015-04-17 22:08:42 +02:00
uint16_t * gamma ;
uint16_t * inv_gamma ;
2014-10-08 03:05:54 +03:00
2015-08-17 22:03:20 +02:00
int numDesc ;
int descIndex [ 2 ] ;
int numSlice ;
struct SwsSlice * slice ;
struct SwsFilterDescriptor * desc ;
2008-10-08 20:46:22 +03:00
uint32_t pal_yuv [ 256 ] ;
uint32_t pal_rgb [ 256 ] ;
2018-08-03 17:06:50 +02:00
float uint2float_lut [ 256 ] ;
2010-01-20 05:26:12 +02:00
/**
* @ name Scaled horizontal lines ring buffer .
* The horizontal scaler keeps just enough scaled lines in a ring buffer
* so they may be passed to the vertical scaler . The pointers to the
* allocated buffers for each line are duplicated in sequence in the ring
* buffer to simplify indexing and avoid wrapping around between lines
* inside the vertical scaler code . The wrapping is done before the
* vertical scaler is called .
*/
//@{
2011-12-30 20:37:16 +03:00
int lastInLumBuf ; ///< Last scaled horizontal luma/alpha line from source in the ring buffer.
int lastInChrBuf ; ///< Last scaled horizontal chroma line from source in the ring buffer.
2010-01-20 05:26:12 +02:00
//@}
2007-04-29 16:39:27 +03:00
2011-05-26 16:15:38 +03:00
uint8_t * formatConvBuffer ;
2016-03-28 18:25:18 +02:00
int needAlpha ;
2007-04-29 16:39:27 +03:00
2010-01-20 05:26:12 +02:00
/**
* @ name Horizontal and vertical filters .
* To better understand the following fields , here is a pseudo - code of
* their usage in filtering a horizontal line :
* @ code
* for ( i = 0 ; i < width ; i + + ) {
* dst [ i ] = 0 ;
* for ( j = 0 ; j < filterSize ; j + + )
* dst [ i ] + = src [ filterPos [ i ] + j ] * filter [ filterSize * i + j ] ;
* dst [ i ] > > = FRAC_BITS ; // The actual implementation is fixed-point.
* }
* @ endcode
*/
//@{
int16_t * hLumFilter ; ///< Array of horizontal filter coefficients for luma/alpha planes.
int16_t * hChrFilter ; ///< Array of horizontal filter coefficients for chroma planes.
int16_t * vLumFilter ; ///< Array of vertical filter coefficients for luma/alpha planes.
int16_t * vChrFilter ; ///< Array of vertical filter coefficients for chroma planes.
2012-03-05 23:26:42 +03:00
int32_t * hLumFilterPos ; ///< Array of horizontal filter starting positions for each dst[i] for luma/alpha planes.
int32_t * hChrFilterPos ; ///< Array of horizontal filter starting positions for each dst[i] for chroma planes.
int32_t * vLumFilterPos ; ///< Array of vertical filter starting positions for each dst[i] for luma/alpha planes.
int32_t * vChrFilterPos ; ///< Array of vertical filter starting positions for each dst[i] for chroma planes.
2011-12-30 20:37:16 +03:00
int hLumFilterSize ; ///< Horizontal filter size for luma/alpha pixels.
int hChrFilterSize ; ///< Horizontal filter size for chroma pixels.
int vLumFilterSize ; ///< Vertical filter size for luma/alpha pixels.
int vChrFilterSize ; ///< Vertical filter size for chroma pixels.
2010-01-20 05:26:12 +02:00
//@}
2007-04-29 16:39:27 +03:00
2012-07-08 20:55:31 +03:00
int lumMmxextFilterCodeSize ; ///< Runtime-generated MMXEXT horizontal fast bilinear scaler code size for luma/alpha planes.
int chrMmxextFilterCodeSize ; ///< Runtime-generated MMXEXT horizontal fast bilinear scaler code size for chroma planes.
uint8_t * lumMmxextFilterCode ; ///< Runtime-generated MMXEXT horizontal fast bilinear scaler code for luma/alpha planes.
uint8_t * chrMmxextFilterCode ; ///< Runtime-generated MMXEXT horizontal fast bilinear scaler code for chroma planes.
2007-04-29 16:39:27 +03:00
2012-07-08 20:55:31 +03:00
int canMMXEXTBeUsed ;
2016-01-14 22:33:53 +02:00
int warned_unuseable_bilinear ;
2007-04-29 16:39:27 +03:00
2010-01-16 21:04:55 +02:00
int dstY ; ///< Last destination vertical line output from last slice.
int flags ; ///< Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
2011-12-30 20:37:16 +03:00
void * yuvTable ; // pointer to the yuv->rgb table start so it can be freed()
2013-10-25 23:46:06 +03:00
// alignment ensures the offset can be added in a single
// instruction on e.g. ARM
DECLARE_ALIGNED ( 16 , int , table_gV ) [ 256 + 2 * YUVRGB_TABLE_HEADROOM ] ;
2012-01-10 22:56:15 +03:00
uint8_t * table_rV [ 256 + 2 * YUVRGB_TABLE_HEADROOM ] ;
uint8_t * table_gU [ 256 + 2 * YUVRGB_TABLE_HEADROOM ] ;
uint8_t * table_bU [ 256 + 2 * YUVRGB_TABLE_HEADROOM ] ;
2014-04-27 16:16:13 +03:00
DECLARE_ALIGNED ( 16 , int32_t , input_rgb2yuv_table ) [ 16 + 40 * 4 ] ; // This table can contain both C and SIMD formatted values, the C vales are always at the XY_IDX points
2013-04-14 20:55:06 +03:00
# define RY_IDX 0
# define GY_IDX 1
# define BY_IDX 2
# define RU_IDX 3
# define GU_IDX 4
# define BU_IDX 5
# define RV_IDX 6
# define GV_IDX 7
# define BV_IDX 8
2013-04-15 00:51:58 +03:00
# define RGB2YUV_SHIFT 15
2007-04-29 16:39:27 +03:00
2013-01-19 01:01:08 +03:00
int * dither_error [ 4 ] ;
2007-04-29 16:39:27 +03:00
//Colorspace stuff
int contrast , brightness , saturation ; // for sws_getColorspaceDetails
int srcColorspaceTable [ 4 ] ;
int dstColorspaceTable [ 4 ] ;
2010-01-16 21:04:55 +02:00
int srcRange ; ///< 0 = MPG YUV range, 1 = JPG YUV range (source image).
int dstRange ; ///< 0 = MPG YUV range, 1 = JPG YUV range (destination image).
2012-01-22 08:06:22 +03:00
int src0Alpha ;
int dst0Alpha ;
2013-04-28 20:28:42 +03:00
int srcXYZ ;
int dstXYZ ;
2013-07-14 06:19:44 +03:00
int src_h_chr_pos ;
int dst_h_chr_pos ;
int src_v_chr_pos ;
int dst_v_chr_pos ;
2008-09-13 14:52:03 +03:00
int yuv2rgb_y_offset ;
int yuv2rgb_y_coeff ;
int yuv2rgb_v2r_coeff ;
int yuv2rgb_v2g_coeff ;
int yuv2rgb_u2g_coeff ;
int yuv2rgb_u2b_coeff ;
2007-04-29 16:39:27 +03:00
# define RED_DITHER "0*8"
# define GREEN_DITHER "1*8"
# define BLUE_DITHER "2*8"
# define Y_COEFF "3*8"
# define VR_COEFF "4*8"
# define UB_COEFF "5*8"
# define VG_COEFF "6*8"
# define UG_COEFF "7*8"
# define Y_OFFSET "8*8"
# define U_OFFSET "9*8"
# define V_OFFSET "10*8"
2003-11-03 04:19:57 +02:00
# define LUM_MMX_FILTER_OFFSET "11*8"
2014-04-08 19:51:34 +03:00
# define CHR_MMX_FILTER_OFFSET "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)
2014-04-08 20:09:05 +03:00
# define DSTW_OFFSET "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2"
2014-04-08 19:51:34 +03:00
# define ESP_OFFSET "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+8"
# define VROUNDER_OFFSET "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+16"
# define U_TEMP "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+24"
# define V_TEMP "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+32"
# define Y_TEMP "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+40"
# define ALP_MMX_FILTER_OFFSET "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*2+48"
# define UV_OFF_PX "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*3+48"
# define UV_OFF_BYTE "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*3+56"
# define DITHER16 "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*3+64"
# define DITHER32 "11*8+4*4*"AV_STRINGIFY(MAX_FILTER_SIZE)"*3+80"
2014-07-12 23:10:53 +03:00
# define DITHER32_INT (11*8+4*4*MAX_FILTER_SIZE*3+80) // value equal to above, used for checking that the struct hasn't been changed by mistake
2007-04-29 16:39:27 +03:00
2009-06-05 00:55:52 +03:00
DECLARE_ALIGNED ( 8 , uint64_t , redDither ) ;
DECLARE_ALIGNED ( 8 , uint64_t , greenDither ) ;
DECLARE_ALIGNED ( 8 , uint64_t , blueDither ) ;
DECLARE_ALIGNED ( 8 , uint64_t , yCoeff ) ;
DECLARE_ALIGNED ( 8 , uint64_t , vrCoeff ) ;
DECLARE_ALIGNED ( 8 , uint64_t , ubCoeff ) ;
DECLARE_ALIGNED ( 8 , uint64_t , vgCoeff ) ;
DECLARE_ALIGNED ( 8 , uint64_t , ugCoeff ) ;
DECLARE_ALIGNED ( 8 , uint64_t , yOffset ) ;
DECLARE_ALIGNED ( 8 , uint64_t , uOffset ) ;
DECLARE_ALIGNED ( 8 , uint64_t , vOffset ) ;
2011-12-30 20:37:16 +03:00
int32_t lumMmxFilter [ 4 * MAX_FILTER_SIZE ] ;
int32_t chrMmxFilter [ 4 * MAX_FILTER_SIZE ] ;
2010-01-20 05:26:12 +02:00
int dstW ; ///< Width of destination luma/alpha planes.
2009-06-05 00:55:52 +03:00
DECLARE_ALIGNED ( 8 , uint64_t , esp ) ;
DECLARE_ALIGNED ( 8 , uint64_t , vRounder ) ;
DECLARE_ALIGNED ( 8 , uint64_t , u_temp ) ;
DECLARE_ALIGNED ( 8 , uint64_t , v_temp ) ;
DECLARE_ALIGNED ( 8 , uint64_t , y_temp ) ;
2012-02-19 17:38:49 +03:00
int32_t alpMmxFilter [ 4 * MAX_FILTER_SIZE ] ;
2011-07-10 19:07:43 +03:00
// alignment of these values is not necessary, but merely here
// to maintain the same offset across x8632 and x86-64. Once we
// use proper offset macros in the asm, they can be removed.
2011-05-28 16:52:50 +03:00
DECLARE_ALIGNED ( 8 , ptrdiff_t , uv_off ) ; ///< offset (in pixels) between u and v planes
2011-05-28 02:01:47 +03:00
DECLARE_ALIGNED ( 8 , ptrdiff_t , uv_offx2 ) ; ///< offset (in bytes) between u and v planes
2011-07-10 19:07:43 +03:00
DECLARE_ALIGNED ( 8 , uint16_t , dither16 ) [ 8 ] ;
DECLARE_ALIGNED ( 8 , uint32_t , dither32 ) [ 8 ] ;
2004-06-27 03:07:15 +03:00
2011-07-05 22:49:11 +03:00
const uint8_t * chrDither8 , * lumDither8 ;
2009-01-14 02:13:56 +02:00
# if HAVE_ALTIVEC
2009-12-20 19:42:35 +02:00
vector signed short CY ;
vector signed short CRV ;
vector signed short CBU ;
vector signed short CGU ;
vector signed short CGV ;
vector signed short OY ;
vector unsigned short CSHIFT ;
2011-12-30 20:37:16 +03:00
vector signed short * vYCoeffsBank , * vCCoeffsBank ;
2004-06-27 03:07:15 +03:00
# endif
2011-10-23 20:19:57 +03:00
int use_mmx_vfilter ;
2007-05-13 22:22:32 +03:00
2013-04-28 20:28:42 +03:00
/* pre defined color-spaces gamma */
# define XYZ_GAMMA (2.6f)
# define RGB_GAMMA (2.2f)
2013-05-01 18:45:33 +03:00
int16_t * xyzgamma ;
int16_t * rgbgamma ;
2013-07-14 06:30:38 +03:00
int16_t * xyzgammainv ;
int16_t * rgbgammainv ;
2013-04-28 20:28:42 +03:00
int16_t xyz2rgb_matrix [ 3 ] [ 4 ] ;
2013-07-14 06:30:38 +03:00
int16_t rgb2xyz_matrix [ 3 ] [ 4 ] ;
2013-04-28 20:28:42 +03:00
2013-08-25 18:30:05 +03:00
/* function pointers for swscale() */
2011-10-05 17:37:24 +03:00
yuv2planar1_fn yuv2plane1 ;
yuv2planarX_fn yuv2planeX ;
yuv2interleavedX_fn yuv2nv12cX ;
2011-06-06 05:31:11 +03:00
yuv2packed1_fn yuv2packed1 ;
yuv2packed2_fn yuv2packed2 ;
yuv2packedX_fn yuv2packedX ;
2013-01-25 23:51:25 +03:00
yuv2anyX_fn yuv2anyX ;
2009-04-05 01:45:02 +03:00
2011-12-30 20:37:16 +03:00
/// Unscaled conversion of luma plane to YV12 for horizontal scaler.
2011-10-09 00:06:05 +03:00
void ( * lumToYV12 ) ( uint8_t * dst , const uint8_t * src , const uint8_t * src2 , const uint8_t * src3 ,
2011-12-30 20:37:16 +03:00
int width , uint32_t * pal ) ;
/// Unscaled conversion of alpha plane to YV12 for horizontal scaler.
2011-10-09 00:06:05 +03:00
void ( * alpToYV12 ) ( uint8_t * dst , const uint8_t * src , const uint8_t * src2 , const uint8_t * src3 ,
2011-12-30 20:37:16 +03:00
int width , uint32_t * pal ) ;
/// Unscaled conversion of chroma planes to YV12 for horizontal scaler.
2009-12-21 03:41:29 +02:00
void ( * chrToYV12 ) ( uint8_t * dstU , uint8_t * dstV ,
2011-10-09 00:06:05 +03:00
const uint8_t * src1 , const uint8_t * src2 , const uint8_t * src3 ,
2011-12-30 20:37:16 +03:00
int width , uint32_t * pal ) ;
2011-11-24 21:40:05 +03:00
/**
2011-12-30 20:37:16 +03:00
* Functions to read planar input , such as planar RGB , and convert
2013-05-06 18:13:53 +03:00
* internally to Y / UV / A .
2011-12-30 20:37:16 +03:00
*/
2011-11-24 21:40:05 +03:00
/** @{ */
2013-04-14 20:55:06 +03:00
void ( * readLumPlanar ) ( uint8_t * dst , const uint8_t * src [ 4 ] , int width , int32_t * rgb2yuv ) ;
2011-12-30 20:37:16 +03:00
void ( * readChrPlanar ) ( uint8_t * dstU , uint8_t * dstV , const uint8_t * src [ 4 ] ,
2013-04-14 20:55:06 +03:00
int width , int32_t * rgb2yuv ) ;
2013-05-06 18:13:53 +03:00
void ( * readAlpPlanar ) ( uint8_t * dst , const uint8_t * src [ 4 ] , int width , int32_t * rgb2yuv ) ;
2011-11-24 21:40:05 +03:00
/** @} */
2011-06-28 22:32:40 +03:00
/**
* Scale one horizontal line of input data using a bilinear filter
* to produce one line of output data . Compared to SwsContext - > hScale ( ) ,
* please take note of the following caveats when using these :
2016-04-27 19:45:23 +02:00
* - Scaling is done using only 7 bits instead of 14 - bit coefficients .
2011-06-28 22:32:40 +03:00
* - You can use no more than 5 input pixels to produce 4 output
* pixels . Therefore , this filter should not be used for downscaling
* by more than ~ 20 % in width ( because that equals more than 5 / 4 th
* downscaling and thus more than 5 pixels input per 4 pixels output ) .
* - In general , bilinear filters create artifacts during downscaling
* ( even when < 20 % ) , because one output pixel will span more than one
* input pixel , and thus some pixels will need edges of both neighbor
* pixels to interpolate the output pixel . Since you can use at most
* two input pixels per output pixel in bilinear scaling , this is
* impossible and thus downscaling by any size will create artifacts .
* To enable this type of scaling , set SWS_FLAG_FAST_BILINEAR
* in SwsContext - > flags .
*/
/** @{ */
2009-04-05 01:45:02 +03:00
void ( * hyscale_fast ) ( struct SwsContext * c ,
2011-05-26 19:57:26 +03:00
int16_t * dst , int dstWidth ,
2009-04-05 01:45:02 +03:00
const uint8_t * src , int srcW , int xInc ) ;
void ( * hcscale_fast ) ( struct SwsContext * c ,
2011-05-26 19:57:26 +03:00
int16_t * dst1 , int16_t * dst2 , int dstWidth ,
2009-04-13 01:43:03 +03:00
const uint8_t * src1 , const uint8_t * src2 ,
int srcW , int xInc ) ;
2011-06-28 22:32:40 +03:00
/** @} */
2009-04-05 01:45:02 +03:00
2011-06-28 22:32:40 +03:00
/**
* Scale one horizontal line of input data using a filter over the input
* lines , to produce one ( differently sized ) line of output data .
*
* @ param dst pointer to destination buffer for horizontally scaled
2011-08-03 01:42:35 +03:00
* data . If the number of bits per component of one
* destination pixel ( SwsContext - > dstBpc ) is < = 10 , data
2016-04-27 19:45:23 +02:00
* will be 15 bpc in 16 bits ( int16_t ) width . Else ( i . e .
2011-08-03 01:42:35 +03:00
* SwsContext - > dstBpc = = 16 ) , data will be 19 bpc in
2016-04-27 19:45:23 +02:00
* 32 bits ( int32_t ) width .
2011-06-28 22:32:40 +03:00
* @ param dstW width of destination image
2011-08-03 01:42:35 +03:00
* @ param src pointer to source data to be scaled . If the number of
* bits per component of a source pixel ( SwsContext - > srcBpc )
2016-04-27 19:45:23 +02:00
* is 8 , this is 8 bpc in 8 bits ( uint8_t ) width . Else
2011-08-03 01:42:35 +03:00
* ( i . e . SwsContext - > dstBpc > 8 ) , this is native depth
2016-04-27 19:45:23 +02:00
* in 16 bits ( uint16_t ) width . In other words , for 9 - bit
2011-08-03 01:42:35 +03:00
* YUV input , this is 9 bpc , for 10 - bit YUV input , this is
* 10 bpc , and for 16 - bit RGB or YUV , this is 16 bpc .
2011-06-28 22:32:40 +03:00
* @ param filter filter coefficients to be used per output pixel for
* scaling . This contains 14 bpp filtering coefficients .
* Guaranteed to contain dstW * filterSize entries .
* @ param filterPos position of the first input pixel to be used for
* each output pixel during scaling . Guaranteed to
* contain dstW entries .
* @ param filterSize the number of input coefficients to be used ( and
* thus the number of input pixels to be used ) for
* creating a single output pixel . Is aligned to 4
* ( and input coefficients thus padded with zeroes )
* to simplify creating SIMD code .
*/
2011-08-03 21:25:01 +03:00
/** @{ */
2011-12-30 20:37:16 +03:00
void ( * hyScale ) ( struct SwsContext * c , int16_t * dst , int dstW ,
const uint8_t * src , const int16_t * filter ,
2012-03-05 23:26:42 +03:00
const int32_t * filterPos , int filterSize ) ;
2011-12-30 20:37:16 +03:00
void ( * hcScale ) ( struct SwsContext * c , int16_t * dst , int dstW ,
const uint8_t * src , const int16_t * filter ,
2012-03-05 23:26:42 +03:00
const int32_t * filterPos , int filterSize ) ;
2011-08-03 21:25:01 +03:00
/** @} */
2009-04-05 01:45:02 +03:00
2011-12-30 20:37:16 +03:00
/// Color range conversion function for luma plane if needed.
void ( * lumConvertRange ) ( int16_t * dst , int width ) ;
/// Color range conversion function for chroma planes if needed.
void ( * chrConvertRange ) ( int16_t * dst1 , int16_t * dst2 , int width ) ;
2009-12-20 18:32:58 +02:00
2009-12-21 03:34:45 +02:00
int needs_hcscale ; ///< Set if there are chroma planes to be converted.
2013-08-14 18:19:32 +03:00
SwsDither dither ;
2015-08-06 16:36:05 +02:00
SwsAlphaBlend alphablend ;
2021-05-19 14:45:34 +02:00
// scratch buffer for converting packed rgb0 sources
// filled with a copy of the input frame + fully opaque alpha,
// then passed as input to further conversion
uint8_t * rgb0_scratch ;
unsigned int rgb0_scratch_allocated ;
// scratch buffer for converting XYZ sources
// filled with the input converted to rgb48
// then passed as input to further conversion
uint8_t * xyz_scratch ;
unsigned int xyz_scratch_allocated ;
2021-06-24 13:11:34 +02:00
unsigned int dst_slice_align ;
swscale/swscale: Fix races when using unaligned strides/data
In this case the current code tries to warn once; to do so, it uses
ordinary static ints to store whether the warning has already been
emitted. This is both a data race (and therefore undefined behaviour)
as well as a race condition, because it is really possible for multiple
threads to be the one thread to emit the warning. This is actually
common since the introduction of the new multithreaded scaling API.
This commit fixes this by using atomic integers for the state;
furthermore, these are not static anymore, but rather contained
in the user-facing SwsContext (i.e. the parent SwsContext in case
of slice-threading).
Given that these atomic variables are not intended for synchronization
at all (but only for atomicity, i.e. only to output the warning once),
the atomic operations use memory_order_relaxed.
This affected the nv12, nv21, yuv420, yuv420p10, yuv422, yuv422p10 and
yuv444 filter-overlay FATE-tests.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
2021-09-18 05:11:57 +02:00
atomic_int stride_unaligned_warned ;
atomic_int data_unaligned_warned ;
2003-02-21 22:35:18 +02:00
} SwsContext ;
//FIXME check init (where 0)
2009-03-18 10:06:08 +02:00
SwsFunc ff_yuv2rgb_get_func_ptr ( SwsContext * c ) ;
2009-04-13 01:43:03 +03:00
int ff_yuv2rgb_c_init_tables ( SwsContext * c , const int inv_table [ 4 ] ,
int fullRange , int brightness ,
int contrast , int saturation ) ;
2012-10-11 02:06:04 +03:00
void ff_yuv2rgb_init_tables_ppc ( SwsContext * c , const int inv_table [ 4 ] ,
int brightness , int contrast , int saturation ) ;
2003-02-21 22:35:18 +02:00
2020-04-01 09:32:15 +02:00
void ff_updateMMXDitherTables ( SwsContext * c , int dstY ) ;
2011-06-03 08:00:00 +03:00
2014-04-15 00:18:21 +03:00
av_cold void ff_sws_init_range_convert ( SwsContext * c ) ;
2012-10-12 20:54:45 +03:00
SwsFunc ff_yuv2rgb_init_x86 ( SwsContext * c ) ;
2012-10-11 02:06:04 +03:00
SwsFunc ff_yuv2rgb_init_ppc ( SwsContext * c ) ;
2007-12-08 06:29:11 +02:00
2012-10-06 14:29:37 +03:00
static av_always_inline int is16BPS ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
2015-09-03 13:44:14 +02:00
return desc - > comp [ 0 ] . depth = = 16 ;
2012-10-06 14:29:37 +03:00
}
2011-10-09 00:07:05 +03:00
2020-05-04 01:10:04 +02:00
static av_always_inline int is32BPS ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
return desc - > comp [ 0 ] . depth = = 32 ;
}
2016-11-09 21:45:33 +02:00
static av_always_inline int isNBPS ( enum AVPixelFormat pix_fmt )
2012-10-06 14:29:37 +03:00
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
2015-09-08 15:43:03 +02:00
return desc - > comp [ 0 ] . depth > = 9 & & desc - > comp [ 0 ] . depth < = 14 ;
2012-10-06 14:29:37 +03:00
}
2011-10-09 00:07:05 +03:00
2012-10-06 14:29:37 +03:00
static av_always_inline int isBE ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
2013-05-12 16:41:49 +03:00
return desc - > flags & AV_PIX_FMT_FLAG_BE ;
2012-10-06 14:29:37 +03:00
}
static av_always_inline int isYUV ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
2013-05-12 16:41:49 +03:00
return ! ( desc - > flags & AV_PIX_FMT_FLAG_RGB ) & & desc - > nb_components > = 2 ;
2012-10-06 14:29:37 +03:00
}
static av_always_inline int isPlanarYUV ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
2013-05-12 16:41:49 +03:00
return ( ( desc - > flags & AV_PIX_FMT_FLAG_PLANAR ) & & isYUV ( pix_fmt ) ) ;
2012-10-06 14:29:37 +03:00
}
2018-03-02 19:09:05 +02:00
/*
* Identity semi - planar YUV formats . Specifically , those are YUV formats
* where the second and third components ( U & V ) are on the same plane .
*/
static av_always_inline int isSemiPlanarYUV ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
return ( isPlanarYUV ( pix_fmt ) & & desc - > comp [ 1 ] . plane = = desc - > comp [ 2 ] . plane ) ;
}
2012-10-06 14:29:37 +03:00
static av_always_inline int isRGB ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
2013-05-12 16:41:49 +03:00
return ( desc - > flags & AV_PIX_FMT_FLAG_RGB ) ;
2012-10-06 14:29:37 +03:00
}
2011-11-07 03:52:27 +03:00
2017-03-19 15:57:29 +02:00
static av_always_inline int isGray ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
return ! ( desc - > flags & AV_PIX_FMT_FLAG_PAL ) & &
! ( desc - > flags & AV_PIX_FMT_FLAG_HWACCEL ) & &
desc - > nb_components < = 2 & &
pix_fmt ! = AV_PIX_FMT_MONOBLACK & &
pix_fmt ! = AV_PIX_FMT_MONOWHITE ;
}
2011-11-07 03:52:27 +03:00
2017-03-23 23:51:15 +02:00
static av_always_inline int isRGBinInt ( enum AVPixelFormat pix_fmt )
{
return pix_fmt = = AV_PIX_FMT_RGB48BE | |
pix_fmt = = AV_PIX_FMT_RGB48LE | |
pix_fmt = = AV_PIX_FMT_RGB32 | |
pix_fmt = = AV_PIX_FMT_RGB32_1 | |
pix_fmt = = AV_PIX_FMT_RGB24 | |
pix_fmt = = AV_PIX_FMT_RGB565BE | |
pix_fmt = = AV_PIX_FMT_RGB565LE | |
pix_fmt = = AV_PIX_FMT_RGB555BE | |
pix_fmt = = AV_PIX_FMT_RGB555LE | |
pix_fmt = = AV_PIX_FMT_RGB444BE | |
pix_fmt = = AV_PIX_FMT_RGB444LE | |
pix_fmt = = AV_PIX_FMT_RGB8 | |
pix_fmt = = AV_PIX_FMT_RGB4 | |
pix_fmt = = AV_PIX_FMT_RGB4_BYTE | |
pix_fmt = = AV_PIX_FMT_RGBA64BE | |
pix_fmt = = AV_PIX_FMT_RGBA64LE | |
pix_fmt = = AV_PIX_FMT_MONOBLACK | |
pix_fmt = = AV_PIX_FMT_MONOWHITE ;
}
static av_always_inline int isBGRinInt ( enum AVPixelFormat pix_fmt )
{
return pix_fmt = = AV_PIX_FMT_BGR48BE | |
pix_fmt = = AV_PIX_FMT_BGR48LE | |
pix_fmt = = AV_PIX_FMT_BGR32 | |
pix_fmt = = AV_PIX_FMT_BGR32_1 | |
pix_fmt = = AV_PIX_FMT_BGR24 | |
pix_fmt = = AV_PIX_FMT_BGR565BE | |
pix_fmt = = AV_PIX_FMT_BGR565LE | |
pix_fmt = = AV_PIX_FMT_BGR555BE | |
pix_fmt = = AV_PIX_FMT_BGR555LE | |
pix_fmt = = AV_PIX_FMT_BGR444BE | |
pix_fmt = = AV_PIX_FMT_BGR444LE | |
pix_fmt = = AV_PIX_FMT_BGR8 | |
pix_fmt = = AV_PIX_FMT_BGR4 | |
pix_fmt = = AV_PIX_FMT_BGR4_BYTE | |
pix_fmt = = AV_PIX_FMT_BGRA64BE | |
pix_fmt = = AV_PIX_FMT_BGRA64LE | |
pix_fmt = = AV_PIX_FMT_MONOBLACK | |
pix_fmt = = AV_PIX_FMT_MONOWHITE ;
}
2011-11-25 03:38:21 +03:00
2017-03-19 16:04:53 +02:00
static av_always_inline int isBayer ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
return ! ! ( desc - > flags & AV_PIX_FMT_FLAG_BAYER ) ;
}
2014-02-23 04:18:57 +03:00
2020-08-02 15:55:38 +02:00
static av_always_inline int isBayer16BPS ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
return desc - > comp [ 1 ] . depth = = 8 ;
}
2017-03-19 16:15:10 +02:00
static av_always_inline int isAnyRGB ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
return ( desc - > flags & AV_PIX_FMT_FLAG_RGB ) | |
pix_fmt = = AV_PIX_FMT_MONOBLACK | | pix_fmt = = AV_PIX_FMT_MONOWHITE ;
}
2011-11-25 03:38:21 +03:00
2018-08-03 17:06:50 +02:00
static av_always_inline int isFloat ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
return desc - > flags & AV_PIX_FMT_FLAG_FLOAT ;
}
2012-10-06 14:29:37 +03:00
static av_always_inline int isALPHA ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
2013-06-26 16:07:13 +03:00
if ( pix_fmt = = AV_PIX_FMT_PAL8 )
return 1 ;
2013-05-15 12:23:14 +03:00
return desc - > flags & AV_PIX_FMT_FLAG_ALPHA ;
2012-10-06 14:29:37 +03:00
}
2011-11-07 03:52:27 +03:00
2012-10-06 14:29:37 +03:00
static av_always_inline int isPacked ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
2017-03-19 16:28:19 +02:00
return ( desc - > nb_components > = 2 & & ! ( desc - > flags & AV_PIX_FMT_FLAG_PLANAR ) ) | |
pix_fmt = = AV_PIX_FMT_PAL8 | |
pix_fmt = = AV_PIX_FMT_MONOBLACK | | pix_fmt = = AV_PIX_FMT_MONOWHITE ;
2012-10-06 14:29:37 +03:00
}
2011-11-07 03:52:27 +03:00
2012-10-06 14:29:37 +03:00
static av_always_inline int isPlanar ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
2013-05-12 16:41:49 +03:00
return ( desc - > nb_components > = 2 & & ( desc - > flags & AV_PIX_FMT_FLAG_PLANAR ) ) ;
2012-10-06 14:29:37 +03:00
}
static av_always_inline int isPackedRGB ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
2013-05-12 16:41:49 +03:00
return ( ( desc - > flags & ( AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB ) ) = = AV_PIX_FMT_FLAG_RGB ) ;
2012-10-06 14:29:37 +03:00
}
static av_always_inline int isPlanarRGB ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
2013-05-12 16:41:49 +03:00
return ( ( desc - > flags & ( AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB ) ) = =
( AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB ) ) ;
2012-10-06 14:29:37 +03:00
}
static av_always_inline int usePal ( enum AVPixelFormat pix_fmt )
{
avutil/pixdesc: deprecate AV_PIX_FMT_FLAG_PSEUDOPAL
PSEUDOPAL pixel formats are not paletted, but carried a palette with the
intention of allowing code to treat unpaletted formats as paletted. The
palette simply mapped the byte values to the resulting RGB values,
making it some sort of LUT for RGB conversion.
It was used for 1 byte formats only: RGB4_BYTE, BGR4_BYTE, RGB8, BGR8,
GRAY8. The first 4 are awfully obscure, used only by some ancient bitmap
formats. The last one, GRAY8, is more common, but its treatment is
grossly incorrect. It considers full range GRAY8 only, so GRAY8 coming
from typical Y video planes was not mapped to the correct RGB values.
This cannot be fixed, because AVFrame.color_range can be freely changed
at runtime, and there is nothing to ensure the pseudo palette is
updated.
Also, nothing actually used the PSEUDOPAL palette data, except xwdenc
(trivially changed in the previous commit). All other code had to treat
it as a special case, just to ignore or to propagate palette data.
In conclusion, this was just a very strange old mechnaism that has no
real justification to exist anymore (although it may have been nice and
useful in the past). Now it's an artifact that makes the API harder to
use: API users who allocate their own pixel data have to be aware that
they need to allocate the palette, or FFmpeg will crash on them in
_some_ situations. On top of this, there was no API to allocate the
pseuo palette outside of av_frame_get_buffer().
This patch not only deprecates AV_PIX_FMT_FLAG_PSEUDOPAL, but also makes
the pseudo palette optional. Nothing accesses it anymore, though if it's
set, it's propagated. It's still allocated and initialized for
compatibility with API users that rely on this feature. But new API
users do not need to allocate it. This was an explicit goal of this
patch.
Most changes replace AV_PIX_FMT_FLAG_PSEUDOPAL with FF_PSEUDOPAL. I
first tried #ifdefing all code, but it was a mess. The FF_PSEUDOPAL
macro reduces the mess, and still allows defining FF_API_PSEUDOPAL to 0.
Passes FATE with FF_API_PSEUDOPAL enabled and disabled. In addition,
FATE passes with FF_API_PSEUDOPAL set to 1, but with allocation
functions manually changed to not allocating a palette.
2018-03-29 15:18:28 +02:00
switch ( pix_fmt ) {
case AV_PIX_FMT_PAL8 :
case AV_PIX_FMT_BGR4_BYTE :
case AV_PIX_FMT_BGR8 :
case AV_PIX_FMT_GRAY8 :
case AV_PIX_FMT_RGB4_BYTE :
case AV_PIX_FMT_RGB8 :
return 1 ;
default :
return 0 ;
}
2012-10-06 14:29:37 +03:00
}
2006-09-17 18:15:13 +03:00
2021-12-23 11:19:11 +02:00
/*
* Identity formats where the data is in the high bits , and the low bits are shifted away .
*/
static av_always_inline int isDataInHighBits ( enum AVPixelFormat pix_fmt )
{
int i ;
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
if ( desc - > flags & ( AV_PIX_FMT_FLAG_BITSTREAM | AV_PIX_FMT_FLAG_HWACCEL ) )
return 0 ;
for ( i = 0 ; i < desc - > nb_components ; i + + ) {
if ( ! desc - > comp [ i ] . shift )
return 0 ;
if ( ( desc - > comp [ i ] . shift + desc - > comp [ i ] . depth ) & 0x7 )
return 0 ;
}
return 1 ;
}
2021-12-23 11:49:27 +02:00
/*
* Identity formats where the chroma planes are swapped ( CrCb order ) .
*/
static av_always_inline int isSwappedChroma ( enum AVPixelFormat pix_fmt )
{
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( pix_fmt ) ;
av_assert0 ( desc ) ;
if ( ! isYUV ( pix_fmt ) )
return 0 ;
if ( ( desc - > flags & AV_PIX_FMT_FLAG_ALPHA ) & & desc - > nb_components < 4 )
return 0 ;
if ( desc - > nb_components < 3 )
return 0 ;
if ( ! isPlanarYUV ( pix_fmt ) | | isSemiPlanarYUV ( pix_fmt ) )
return desc - > comp [ 1 ] . offset > desc - > comp [ 2 ] . offset ;
else
return desc - > comp [ 1 ] . plane > desc - > comp [ 2 ] . plane ;
}
2009-02-09 19:53:33 +02:00
extern const uint64_t ff_dither4 [ 2 ] ;
extern const uint64_t ff_dither8 [ 2 ] ;
2011-06-04 07:31:35 +03:00
2013-08-16 13:53:24 +03:00
extern const uint8_t ff_dither_2x2_4 [ 3 ] [ 8 ] ;
extern const uint8_t ff_dither_2x2_8 [ 3 ] [ 8 ] ;
extern const uint8_t ff_dither_4x4_16 [ 5 ] [ 8 ] ;
extern const uint8_t ff_dither_8x8_32 [ 9 ] [ 8 ] ;
extern const uint8_t ff_dither_8x8_73 [ 9 ] [ 8 ] ;
2013-08-16 16:01:47 +03:00
extern const uint8_t ff_dither_8x8_128 [ 9 ] [ 8 ] ;
2013-08-16 13:53:24 +03:00
extern const uint8_t ff_dither_8x8_220 [ 9 ] [ 8 ] ;
2013-08-15 14:38:12 +03:00
2016-03-31 22:01:09 +02:00
extern const int32_t ff_yuv2rgb_coeffs [ 11 ] [ 4 ] ;
2008-02-22 10:08:19 +02:00
2015-10-27 23:56:53 +02:00
extern const AVClass ff_sws_context_class ;
2008-07-10 01:43:08 +03:00
2010-01-24 04:08:22 +02:00
/**
2021-06-10 17:10:35 +02:00
* Set c - > convert_unscaled to an unscaled converter if one exists for the
* specific source and destination formats , bit depths , flags , etc .
2010-01-24 04:08:22 +02:00
*/
void ff_get_unscaled_swscale ( SwsContext * c ) ;
2013-08-20 11:48:40 +03:00
void ff_get_unscaled_swscale_ppc ( SwsContext * c ) ;
2013-12-14 10:31:42 +03:00
void ff_get_unscaled_swscale_arm ( SwsContext * c ) ;
2016-02-08 16:14:17 +02:00
void ff_get_unscaled_swscale_aarch64 ( SwsContext * c ) ;
2011-06-03 06:17:20 +03:00
2021-05-20 13:37:06 +02:00
void ff_sws_init_scale ( SwsContext * c ) ;
2010-01-24 04:08:22 +02:00
2012-02-01 18:38:55 +03:00
void ff_sws_init_input_funcs ( SwsContext * c ) ;
2012-02-01 18:38:56 +03:00
void ff_sws_init_output_funcs ( SwsContext * c ,
yuv2planar1_fn * yuv2plane1 ,
yuv2planarX_fn * yuv2planeX ,
yuv2interleavedX_fn * yuv2nv12cX ,
yuv2packed1_fn * yuv2packed1 ,
yuv2packed2_fn * yuv2packed2 ,
2013-01-25 23:51:25 +03:00
yuv2packedX_fn * yuv2packedX ,
yuv2anyX_fn * yuv2anyX ) ;
2012-10-11 02:06:04 +03:00
void ff_sws_init_swscale_ppc ( SwsContext * c ) ;
2018-11-26 14:24:15 +02:00
void ff_sws_init_swscale_vsx ( SwsContext * c ) ;
2012-10-12 20:54:45 +03:00
void ff_sws_init_swscale_x86 ( SwsContext * c ) ;
2016-03-18 17:39:34 +02:00
void ff_sws_init_swscale_aarch64 ( SwsContext * c ) ;
2016-04-07 16:52:00 +02:00
void ff_sws_init_swscale_arm ( SwsContext * c ) ;
2011-06-03 06:04:04 +03:00
2014-07-19 05:57:47 +03:00
void ff_hyscale_fast_c ( SwsContext * c , int16_t * dst , int dstWidth ,
const uint8_t * src , int srcW , int xInc ) ;
void ff_hcscale_fast_c ( SwsContext * c , int16_t * dst1 , int16_t * dst2 ,
int dstWidth , const uint8_t * src1 ,
const uint8_t * src2 , int srcW , int xInc ) ;
2014-07-19 06:09:57 +03:00
int ff_init_hscaler_mmxext ( int dstW , int xInc , uint8_t * filterCode ,
int16_t * filter , int32_t * filterPos ,
int numSplits ) ;
void ff_hyscale_fast_mmxext ( SwsContext * c , int16_t * dst ,
int dstWidth , const uint8_t * src ,
int srcW , int xInc ) ;
void ff_hcscale_fast_mmxext ( SwsContext * c , int16_t * dst1 , int16_t * dst2 ,
int dstWidth , const uint8_t * src1 ,
const uint8_t * src2 , int srcW , int xInc ) ;
2014-07-19 05:57:47 +03:00
2015-08-08 12:36:49 +02:00
/**
* Allocate and return an SwsContext .
* This is like sws_getContext ( ) but does not perform the init step , allowing
* the user to set additional AVOptions .
*
* @ see sws_getContext ( )
*/
struct SwsContext * sws_alloc_set_opts ( int srcW , int srcH , enum AVPixelFormat srcFormat ,
int dstW , int dstH , enum AVPixelFormat dstFormat ,
int flags , const double * param ) ;
2015-08-06 16:36:05 +02:00
int ff_sws_alphablendaway ( SwsContext * c , const uint8_t * src [ ] ,
int srcStride [ ] , int srcSliceY , int srcSliceH ,
uint8_t * dst [ ] , int dstStride [ ] ) ;
2012-10-30 20:39:44 +03:00
static inline void fillPlane16 ( uint8_t * plane , int stride , int width , int height , int y ,
int alpha , int bits , const int big_endian )
{
int i , j ;
uint8_t * ptr = plane + stride * y ;
2015-09-08 15:43:03 +02:00
int v = alpha ? 0xFFFF > > ( 16 - bits ) : ( 1 < < ( bits - 1 ) ) ;
2012-10-30 20:39:44 +03:00
for ( i = 0 ; i < height ; i + + ) {
# define FILL(wfunc) \
for ( j = 0 ; j < width ; j + + ) { \
wfunc ( ptr + 2 * j , v ) ; \
}
if ( big_endian ) {
FILL ( AV_WB16 ) ;
} else {
FILL ( AV_WL16 ) ;
}
ptr + = stride ;
}
2020-05-04 01:10:04 +02:00
# undef FILL
2012-10-30 20:39:44 +03:00
}
2020-05-04 01:10:04 +02:00
static inline void fillPlane32 ( uint8_t * plane , int stride , int width , int height , int y ,
int alpha , int bits , const int big_endian , int is_float )
{
int i , j ;
uint8_t * ptr = plane + stride * y ;
uint32_t v ;
uint32_t onef32 = 0x3f800000 ;
if ( is_float )
v = alpha ? onef32 : 0 ;
else
v = alpha ? 0xFFFFFFFF > > ( 32 - bits ) : ( 1 < < ( bits - 1 ) ) ;
for ( i = 0 ; i < height ; i + + ) {
# define FILL(wfunc) \
for ( j = 0 ; j < width ; j + + ) { \
wfunc ( ptr + 4 * j , v ) ; \
}
if ( big_endian ) {
FILL ( AV_WB32 ) ;
} else {
FILL ( AV_WL32 ) ;
}
ptr + = stride ;
}
# undef FILL
}
2015-08-17 22:03:20 +02:00
# define MAX_SLICE_PLANES 4
/// Slice plane
typedef struct SwsPlane
{
int available_lines ; ///< max number of lines that can be hold by this plane
int sliceY ; ///< index of first line
int sliceH ; ///< number of lines
uint8_t * * line ; ///< line buffer
uint8_t * * tmp ; ///< Tmp line buffer used by mmx code
} SwsPlane ;
/**
2016-03-29 00:07:47 +02:00
* Struct which defines a slice of an image to be scaled or an output for
2015-08-17 22:03:20 +02:00
* a scaled slice .
* A slice can also be used as intermediate ring buffer for scaling steps .
*/
typedef struct SwsSlice
{
int width ; ///< Slice line width
int h_chr_sub_sample ; ///< horizontal chroma subsampling factor
int v_chr_sub_sample ; ///< vertical chroma subsampling factor
int is_ring ; ///< flag to identify if this slice is a ring buffer
int should_free_lines ; ///< flag to identify if there are dynamic allocated lines
enum AVPixelFormat fmt ; ///< planes pixel format
SwsPlane plane [ MAX_SLICE_PLANES ] ; ///< color planes
} SwsSlice ;
/**
* Struct which holds all necessary data for processing a slice .
* A processing step can be a color conversion or horizontal / vertical scaling .
*/
typedef struct SwsFilterDescriptor
{
SwsSlice * src ; ///< Source slice
SwsSlice * dst ; ///< Output slice
int alpha ; ///< Flag for processing alpha channel
void * instance ; ///< Filter instance data
/// Function for processing input slice sliceH lines starting from line sliceY
int ( * process ) ( SwsContext * c , struct SwsFilterDescriptor * desc , int sliceY , int sliceH ) ;
} SwsFilterDescriptor ;
// warp input lines in the form (src + width*i + j) to slice format (line[i][j])
2015-10-13 18:32:07 +02:00
// relative=true means first line src[x][0] otherwise first line is src[x][lum/crh Y]
int ff_init_slice_from_src ( SwsSlice * s , uint8_t * src [ 4 ] , int stride [ 4 ] , int srcW , int lumY , int lumH , int chrY , int chrH , int relative ) ;
2015-08-17 22:03:20 +02:00
// Initialize scaler filter descriptor chain
int ff_init_filters ( SwsContext * c ) ;
// Free all filter data
int ff_free_filters ( SwsContext * c ) ;
/*
function for applying ring buffer logic into slice s
It checks if the slice can hold more @ lum lines , if yes
do nothing otherwise remove @ lum least used lines .
2015-08-18 20:06:49 +02:00
It applies the same procedure for @ chr lines .
2015-08-17 22:03:20 +02:00
*/
int ff_rotate_slice ( SwsSlice * s , int lum , int chr ) ;
2015-09-04 23:11:21 +02:00
/// initializes gamma conversion descriptor
int ff_init_gamma_convert ( SwsFilterDescriptor * desc , SwsSlice * src , uint16_t * table ) ;
2015-08-17 22:03:20 +02:00
/// initializes lum pixel format conversion descriptor
int ff_init_desc_fmt_convert ( SwsFilterDescriptor * desc , SwsSlice * src , SwsSlice * dst , uint32_t * pal ) ;
/// initializes lum horizontal scaling descriptor
int ff_init_desc_hscale ( SwsFilterDescriptor * desc , SwsSlice * src , SwsSlice * dst , uint16_t * filter , int * filter_pos , int filter_size , int xInc ) ;
2015-08-18 20:06:49 +02:00
/// initializes chr pixel format conversion descriptor
2015-08-17 22:03:20 +02:00
int ff_init_desc_cfmt_convert ( SwsFilterDescriptor * desc , SwsSlice * src , SwsSlice * dst , uint32_t * pal ) ;
/// initializes chr horizontal scaling descriptor
int ff_init_desc_chscale ( SwsFilterDescriptor * desc , SwsSlice * src , SwsSlice * dst , uint16_t * filter , int * filter_pos , int filter_size , int xInc ) ;
int ff_init_desc_no_chr ( SwsFilterDescriptor * desc , SwsSlice * src , SwsSlice * dst ) ;
2015-08-18 16:47:55 +02:00
/// initializes vertical scaling descriptors
int ff_init_vscale ( SwsContext * c , SwsFilterDescriptor * desc , SwsSlice * src , SwsSlice * dst ) ;
/// setup vertical scaler functions
void ff_init_vscale_pfn ( SwsContext * c , yuv2planar1_fn yuv2plane1 , yuv2planarX_fn yuv2planeX ,
yuv2interleavedX_fn yuv2nv12cX , yuv2packed1_fn yuv2packed1 , yuv2packed2_fn yuv2packed2 ,
yuv2packedX_fn yuv2packedX , yuv2anyX_fn yuv2anyX , int use_mmx ) ;
2021-07-12 12:31:14 +02:00
void ff_sws_slice_worker ( void * priv , int jobnr , int threadnr ,
int nb_jobs , int nb_threads ) ;
2015-08-17 22:07:53 +02:00
//number of extra lines to process
# define MAX_LINES_AHEAD 4
2021-12-15 11:35:02 +02:00
//shuffle filter and filterPos for hyScale and hcScale filters in avx2
2022-02-17 12:03:21 +02:00
int ff_shuffle_filter_coefficients ( SwsContext * c , int * filterPos , int filterSize , int16_t * filter , int dstW ) ;
2008-08-31 10:42:11 +03:00
# endif /* SWSCALE_SWSCALE_INTERNAL_H */