You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-08-15 14:13:16 +02:00
avcodec/huffyuv: Use AVCodecContext.(width|height) directly
These parameters are easily accessible whereever they are accessed, so using copies from HYuvContext is unnecessary. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
@@ -55,12 +55,12 @@ int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int
|
||||
return 0;
|
||||
}
|
||||
|
||||
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
|
||||
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s, int width)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i=0; i<3; i++) {
|
||||
s->temp[i]= av_malloc(4*s->width + 16);
|
||||
s->temp[i] = av_malloc(4 * width + 16);
|
||||
if (!s->temp[i])
|
||||
return AVERROR(ENOMEM);
|
||||
s->temp16[i] = (uint16_t*)s->temp[i];
|
||||
@@ -75,11 +75,6 @@ av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
|
||||
s->flags = avctx->flags;
|
||||
|
||||
ff_bswapdsp_init(&s->bdsp);
|
||||
|
||||
s->width = avctx->width;
|
||||
s->height = avctx->height;
|
||||
|
||||
av_assert1(s->width > 0 && s->height > 0);
|
||||
}
|
||||
|
||||
av_cold void ff_huffyuv_common_end(HYuvContext *s)
|
||||
|
@@ -72,7 +72,6 @@ typedef struct HYuvContext {
|
||||
int yuv;
|
||||
int chroma_h_shift;
|
||||
int chroma_v_shift;
|
||||
int width, height;
|
||||
int flags;
|
||||
int context;
|
||||
int picture_number;
|
||||
@@ -96,7 +95,7 @@ typedef struct HYuvContext {
|
||||
|
||||
void ff_huffyuv_common_init(AVCodecContext *s);
|
||||
void ff_huffyuv_common_end(HYuvContext *s);
|
||||
int ff_huffyuv_alloc_temp(HYuvContext *s);
|
||||
int ff_huffyuv_alloc_temp(HYuvContext *s, int width);
|
||||
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n);
|
||||
|
||||
#endif /* AVCODEC_HUFFYUV_H */
|
||||
|
@@ -558,7 +558,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if ((ret = ff_huffyuv_alloc_temp(s)) < 0)
|
||||
if ((ret = ff_huffyuv_alloc_temp(s, avctx->width)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@@ -873,8 +873,8 @@ static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height,
|
||||
{
|
||||
HYuvContext *s = avctx->priv_data;
|
||||
int fake_ystride, fake_ustride, fake_vstride;
|
||||
const int width = s->width;
|
||||
const int width2 = s->width >> 1;
|
||||
const int width = avctx->width;
|
||||
const int width2 = avctx->width >> 1;
|
||||
int ret;
|
||||
|
||||
if ((ret = init_get_bits8(&s->gb, s->bitstream_buffer + table_size, buf_size - table_size)) < 0)
|
||||
@@ -1185,8 +1185,8 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
HYuvContext *s = avctx->priv_data;
|
||||
const int width = s->width;
|
||||
const int height = s->height;
|
||||
const int width = avctx->width;
|
||||
const int height = avctx->height;
|
||||
int slice, table_size = 0, ret, nb_slices;
|
||||
unsigned slices_info_offset;
|
||||
int slice_height;
|
||||
|
@@ -236,7 +236,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
switch (avctx->pix_fmt) {
|
||||
case AV_PIX_FMT_YUV420P:
|
||||
case AV_PIX_FMT_YUV422P:
|
||||
if (s->width & 1) {
|
||||
if (avctx->width & 1) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
@@ -310,7 +310,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
|
||||
if (s->interlaced != ( s->height > 288 ))
|
||||
if (s->interlaced != ( avctx->height > 288 ))
|
||||
av_log(avctx, AV_LOG_INFO,
|
||||
"using huffyuv 2.2.0 or newer interlacing flag\n");
|
||||
}
|
||||
@@ -379,7 +379,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
|
||||
if (s->context) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
int pels = s->width * s->height / (i ? 40 : 10);
|
||||
int pels = avctx->width * avctx->height / (i ? 40 : 10);
|
||||
for (j = 0; j < s->vlc_n; j++) {
|
||||
int d = FFMIN(j, s->vlc_n - j);
|
||||
s->stats[i][j] = pels/(d*d + 1);
|
||||
@@ -391,7 +391,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
s->stats[i][j]= 0;
|
||||
}
|
||||
|
||||
ret = ff_huffyuv_alloc_temp(s);
|
||||
ret = ff_huffyuv_alloc_temp(s, avctx->width);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@@ -715,9 +715,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
const AVFrame *pict, int *got_packet)
|
||||
{
|
||||
HYuvContext *s = avctx->priv_data;
|
||||
const int width = s->width;
|
||||
const int width2 = s->width>>1;
|
||||
const int height = s->height;
|
||||
const int width = avctx->width;
|
||||
const int width2 = avctx->width >> 1;
|
||||
const int height = avctx->height;
|
||||
const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
|
||||
const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
|
||||
const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
|
||||
@@ -848,7 +848,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
|
||||
const int stride = -p->linesize[0];
|
||||
const int fake_stride = -fake_ystride;
|
||||
int y;
|
||||
int leftr, leftg, leftb, lefta;
|
||||
|
||||
put_bits(&s->pb, 8, lefta = data[A]);
|
||||
@@ -860,7 +859,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
&leftr, &leftg, &leftb, &lefta);
|
||||
encode_bgra_bitstream(s, width - 1, 4);
|
||||
|
||||
for (y = 1; y < s->height; y++) {
|
||||
for (int y = 1; y < height; y++) {
|
||||
const uint8_t *dst = data + y*stride;
|
||||
if (s->predictor == PLANE && s->interlaced < y) {
|
||||
s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
|
||||
@@ -876,7 +875,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
|
||||
const int stride = -p->linesize[0];
|
||||
const int fake_stride = -fake_ystride;
|
||||
int y;
|
||||
int leftr, leftg, leftb;
|
||||
|
||||
put_bits(&s->pb, 8, leftr = data[0]);
|
||||
@@ -888,7 +886,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
&leftr, &leftg, &leftb);
|
||||
encode_bgra_bitstream(s, width-1, 3);
|
||||
|
||||
for (y = 1; y < s->height; y++) {
|
||||
for (int y = 1; y < height; y++) {
|
||||
const uint8_t *dst = data + y * stride;
|
||||
if (s->predictor == PLANE && s->interlaced < y) {
|
||||
s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
|
||||
|
Reference in New Issue
Block a user