1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge commit '1df0b061621b10edde87e3ab7ea83aed381c574f'

* commit '1df0b061621b10edde87e3ab7ea83aed381c574f':
  nuv: Reuse the DSPContext from RTJpegContext

Conflicts:
	libavcodec/nuv.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2014-03-22 21:58:04 +01:00
commit 32da2fd422
3 changed files with 27 additions and 23 deletions

View File

@ -41,7 +41,6 @@ typedef struct {
unsigned char *decomp_buf; unsigned char *decomp_buf;
uint32_t lq[64], cq[64]; uint32_t lq[64], cq[64];
RTJpegContext rtj; RTJpegContext rtj;
DSPContext dsp;
} NuvContext; } NuvContext;
static const uint8_t fallback_lquant[] = { static const uint8_t fallback_lquant[] = {
@ -139,13 +138,11 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height,
"Can't allocate decompression buffer.\n"); "Can't allocate decompression buffer.\n");
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
ff_rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);
c->lq, c->cq);
av_frame_unref(c->pic); av_frame_unref(c->pic);
return 1; return 1;
} else if (quality != c->quality) } else if (quality != c->quality)
ff_rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);
c->lq, c->cq);
return 0; return 0;
} }
@ -184,8 +181,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
ret = get_quant(avctx, c, buf, buf_size); ret = get_quant(avctx, c, buf, buf_size);
if (ret < 0) if (ret < 0)
return ret; return ret;
ff_rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);
c->cq);
return orig_size; return orig_size;
} }
@ -323,7 +319,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
if (avctx->extradata_size) if (avctx->extradata_size)
get_quant(avctx, c, avctx->extradata, avctx->extradata_size); get_quant(avctx, c, avctx->extradata, avctx->extradata_size);
ff_dsputil_init(&c->dsp, avctx); ff_rtjpeg_init(&c->rtj, avctx);
if ((ret = codec_reinit(avctx, avctx->width, avctx->height, -1)) < 0) if ((ret = codec_reinit(avctx, avctx->width, avctx->height, -1)) < 0)
return ret; return ret;

View File

@ -121,7 +121,7 @@ int ff_rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f,
if (res < 0) \ if (res < 0) \
return res; \ return res; \
if (res > 0) \ if (res > 0) \
c->dsp->idct_put(dst, stride, block); \ c->dsp.idct_put(dst, stride, block); \
} while (0) } while (0)
int16_t *block = c->block; int16_t *block = c->block;
BLOCK(c->lquant, y1, f->linesize[0]); BLOCK(c->lquant, y1, f->linesize[0]);
@ -148,7 +148,6 @@ int ff_rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f,
/** /**
* @brief initialize an RTJpegContext, may be called multiple times * @brief initialize an RTJpegContext, may be called multiple times
* @param c context to initialize * @param c context to initialize
* @param dsp specifies the idct to use for decoding
* @param width width of image, will be rounded down to the nearest multiple * @param width width of image, will be rounded down to the nearest multiple
* of 16 for decoding * of 16 for decoding
* @param height height of image, will be rounded down to the nearest multiple * @param height height of image, will be rounded down to the nearest multiple
@ -156,21 +155,29 @@ int ff_rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f,
* @param lquant luma quantization table to use * @param lquant luma quantization table to use
* @param cquant chroma quantization table to use * @param cquant chroma quantization table to use
*/ */
void ff_rtjpeg_decode_init(RTJpegContext *c, DSPContext *dsp, void ff_rtjpeg_decode_init(RTJpegContext *c, int width, int height,
int width, int height,
const uint32_t *lquant, const uint32_t *cquant) { const uint32_t *lquant, const uint32_t *cquant) {
int i; int i;
c->dsp = dsp;
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
int z = ff_zigzag_direct[i]; int p = c->dsp.idct_permutation[i];
int p = c->dsp->idct_permutation[i];
z = ((z << 3) | (z >> 3)) & 63; // rtjpeg uses a transposed variant
// permute the scan and quantization tables for the chosen idct
c->scan[i] = c->dsp->idct_permutation[z];
c->lquant[p] = lquant[i]; c->lquant[p] = lquant[i];
c->cquant[p] = cquant[i]; c->cquant[p] = cquant[i];
} }
c->w = width; c->w = width;
c->h = height; c->h = height;
} }
void ff_rtjpeg_init(RTJpegContext *c, AVCodecContext *avctx)
{
int i;
ff_dsputil_init(&c->dsp, avctx);
for (i = 0; i < 64; i++) {
int z = ff_zigzag_direct[i];
z = ((z << 3) | (z >> 3)) & 63; // rtjpeg uses a transposed variant
// permute the scan and quantization tables for the chosen idct
c->scan[i] = c->dsp.idct_permutation[z];
}
}

View File

@ -31,16 +31,17 @@
typedef struct RTJpegContext { typedef struct RTJpegContext {
int w, h; int w, h;
DSPContext *dsp; DSPContext dsp;
uint8_t scan[64]; uint8_t scan[64];
uint32_t lquant[64]; uint32_t lquant[64];
uint32_t cquant[64]; uint32_t cquant[64];
DECLARE_ALIGNED(16, int16_t, block)[64]; DECLARE_ALIGNED(16, int16_t, block)[64];
} RTJpegContext; } RTJpegContext;
void ff_rtjpeg_decode_init(RTJpegContext *c, DSPContext *dsp, void ff_rtjpeg_init(RTJpegContext *c, AVCodecContext *avctx);
int width, int height,
const uint32_t *lquant, const uint32_t *cquant); void ff_rtjpeg_decode_init(RTJpegContext *c, int width, int height,
const uint32_t *lquant, const uint32_t *cquant);
int ff_rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f, int ff_rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f,
const uint8_t *buf, int buf_size); const uint8_t *buf, int buf_size);