mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Add support for picture_ptr field in MJpegDecodeContext
Signed-off-by: Anton Khirnov <anton@khirnov.net>
This commit is contained in:
parent
f16055eedf
commit
e0e3b8b297
@ -262,9 +262,9 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
|
|||||||
JLSState *state;
|
JLSState *state;
|
||||||
int off = 0, stride = 1, width, shift;
|
int off = 0, stride = 1, width, shift;
|
||||||
|
|
||||||
zero = av_mallocz(s->picture.linesize[0]);
|
zero = av_mallocz(s->picture_ptr->linesize[0]);
|
||||||
last = zero;
|
last = zero;
|
||||||
cur = s->picture.data[0];
|
cur = s->picture_ptr->data[0];
|
||||||
|
|
||||||
state = av_mallocz(sizeof(JLSState));
|
state = av_mallocz(sizeof(JLSState));
|
||||||
/* initialize JPEG-LS state from JPEG parameters */
|
/* initialize JPEG-LS state from JPEG parameters */
|
||||||
@ -299,7 +299,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
|
|||||||
t = *((uint16_t*)last);
|
t = *((uint16_t*)last);
|
||||||
}
|
}
|
||||||
last = cur;
|
last = cur;
|
||||||
cur += s->picture.linesize[0];
|
cur += s->picture_ptr->linesize[0];
|
||||||
|
|
||||||
if (s->restart_interval && !--s->restart_count) {
|
if (s->restart_interval && !--s->restart_count) {
|
||||||
align_get_bits(&s->gb);
|
align_get_bits(&s->gb);
|
||||||
@ -309,7 +309,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
|
|||||||
} else if(ilv == 1) { /* line interleaving */
|
} else if(ilv == 1) { /* line interleaving */
|
||||||
int j;
|
int j;
|
||||||
int Rc[3] = {0, 0, 0};
|
int Rc[3] = {0, 0, 0};
|
||||||
memset(cur, 0, s->picture.linesize[0]);
|
memset(cur, 0, s->picture_ptr->linesize[0]);
|
||||||
width = s->width * 3;
|
width = s->width * 3;
|
||||||
for(i = 0; i < s->height; i++) {
|
for(i = 0; i < s->height; i++) {
|
||||||
for(j = 0; j < 3; j++) {
|
for(j = 0; j < 3; j++) {
|
||||||
@ -322,7 +322,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
last = cur;
|
last = cur;
|
||||||
cur += s->picture.linesize[0];
|
cur += s->picture_ptr->linesize[0];
|
||||||
}
|
}
|
||||||
} else if(ilv == 2) { /* sample interleaving */
|
} else if(ilv == 2) { /* sample interleaving */
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Sample interleaved images are not supported.\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Sample interleaved images are not supported.\n");
|
||||||
@ -337,22 +337,22 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
|
|||||||
w = s->width * s->nb_components;
|
w = s->width * s->nb_components;
|
||||||
|
|
||||||
if(s->bits <= 8){
|
if(s->bits <= 8){
|
||||||
uint8_t *src = s->picture.data[0];
|
uint8_t *src = s->picture_ptr->data[0];
|
||||||
|
|
||||||
for(i = 0; i < s->height; i++){
|
for(i = 0; i < s->height; i++){
|
||||||
for(x = off; x < w; x+= stride){
|
for(x = off; x < w; x+= stride){
|
||||||
src[x] <<= shift;
|
src[x] <<= shift;
|
||||||
}
|
}
|
||||||
src += s->picture.linesize[0];
|
src += s->picture_ptr->linesize[0];
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
uint16_t *src = (uint16_t*) s->picture.data[0];
|
uint16_t *src = (uint16_t*) s->picture_ptr->data[0];
|
||||||
|
|
||||||
for(i = 0; i < s->height; i++){
|
for(i = 0; i < s->height; i++){
|
||||||
for(x = 0; x < w; x++){
|
for(x = 0; x < w; x++){
|
||||||
src[x] <<= shift;
|
src[x] <<= shift;
|
||||||
}
|
}
|
||||||
src += s->picture.linesize[0]/2;
|
src += s->picture_ptr->linesize[0]/2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,7 @@ read_header:
|
|||||||
|
|
||||||
//XXX FIXME factorize, this looks very similar to the EOI code
|
//XXX FIXME factorize, this looks very similar to the EOI code
|
||||||
|
|
||||||
*picture= s->picture;
|
*picture= *s->picture_ptr;
|
||||||
*data_size = sizeof(AVFrame);
|
*data_size = sizeof(AVFrame);
|
||||||
|
|
||||||
if(!s->lossless){
|
if(!s->lossless){
|
||||||
|
@ -81,6 +81,9 @@ av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
|
|||||||
{
|
{
|
||||||
MJpegDecodeContext *s = avctx->priv_data;
|
MJpegDecodeContext *s = avctx->priv_data;
|
||||||
|
|
||||||
|
if (!s->picture_ptr)
|
||||||
|
s->picture_ptr = &s->picture;
|
||||||
|
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
dsputil_init(&s->dsp, avctx);
|
dsputil_init(&s->dsp, avctx);
|
||||||
ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
|
ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
|
||||||
@ -282,8 +285,8 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
s->height < ((s->org_height * 3) / 4)) {
|
s->height < ((s->org_height * 3) / 4)) {
|
||||||
s->interlaced = 1;
|
s->interlaced = 1;
|
||||||
s->bottom_field = s->interlace_polarity;
|
s->bottom_field = s->interlace_polarity;
|
||||||
s->picture.interlaced_frame = 1;
|
s->picture_ptr->interlaced_frame = 1;
|
||||||
s->picture.top_field_first = !s->interlace_polarity;
|
s->picture_ptr->top_field_first = !s->interlace_polarity;
|
||||||
height *= 2;
|
height *= 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,20 +345,19 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
s->avctx->pix_fmt = PIX_FMT_GRAY16;
|
s->avctx->pix_fmt = PIX_FMT_GRAY16;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(s->picture.data[0])
|
if(s->picture_ptr->data[0])
|
||||||
s->avctx->release_buffer(s->avctx, &s->picture);
|
s->avctx->release_buffer(s->avctx, s->picture_ptr);
|
||||||
|
|
||||||
s->picture.reference= 0;
|
if(s->avctx->get_buffer(s->avctx, s->picture_ptr) < 0){
|
||||||
if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
s->picture.pict_type= FF_I_TYPE;
|
s->picture_ptr->pict_type= FF_I_TYPE;
|
||||||
s->picture.key_frame= 1;
|
s->picture_ptr->key_frame= 1;
|
||||||
s->got_picture = 1;
|
s->got_picture = 1;
|
||||||
|
|
||||||
for(i=0; i<3; i++){
|
for(i=0; i<3; i++){
|
||||||
s->linesize[i]= s->picture.linesize[i] << s->interlaced;
|
s->linesize[i]= s->picture_ptr->linesize[i] << s->interlaced;
|
||||||
}
|
}
|
||||||
|
|
||||||
// printf("%d %d %d %d %d %d\n", s->width, s->height, s->linesize[0], s->linesize[1], s->interlaced, s->avctx->height);
|
// printf("%d %d %d %d %d %d\n", s->width, s->height, s->linesize[0], s->linesize[1], s->interlaced, s->avctx->height);
|
||||||
@ -635,7 +637,7 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int predictor, int point
|
|||||||
}
|
}
|
||||||
for(mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
for(mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
||||||
const int modified_predictor= mb_y ? predictor : 1;
|
const int modified_predictor= mb_y ? predictor : 1;
|
||||||
uint8_t *ptr = s->picture.data[0] + (linesize * mb_y);
|
uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
|
||||||
|
|
||||||
if (s->interlaced && s->bottom_field)
|
if (s->interlaced && s->bottom_field)
|
||||||
ptr += linesize >> 1;
|
ptr += linesize >> 1;
|
||||||
@ -712,7 +714,7 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point
|
|||||||
for(j=0; j<n; j++) {
|
for(j=0; j<n; j++) {
|
||||||
int pred;
|
int pred;
|
||||||
|
|
||||||
ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
|
ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
|
||||||
if(y==0 && mb_y==0){
|
if(y==0 && mb_y==0){
|
||||||
if(x==0 && mb_x==0){
|
if(x==0 && mb_x==0){
|
||||||
pred= 128 << point_transform;
|
pred= 128 << point_transform;
|
||||||
@ -752,7 +754,7 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point
|
|||||||
for(j=0; j<n; j++) {
|
for(j=0; j<n; j++) {
|
||||||
int pred;
|
int pred;
|
||||||
|
|
||||||
ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
|
ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
|
||||||
PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
|
PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
|
||||||
*ptr= pred + (mjpeg_decode_dc(s, s->dc_index[i]) << point_transform);
|
*ptr= pred + (mjpeg_decode_dc(s, s->dc_index[i]) << point_transform);
|
||||||
if (++x == h) {
|
if (++x == h) {
|
||||||
@ -804,7 +806,7 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, i
|
|||||||
}
|
}
|
||||||
for(i=0; i < nb_components; i++) {
|
for(i=0; i < nb_components; i++) {
|
||||||
int c = s->comp_index[i];
|
int c = s->comp_index[i];
|
||||||
data[c] = s->picture.data[c];
|
data[c] = s->picture_ptr->data[c];
|
||||||
reference_data[c] = reference ? reference->data[c] : NULL;
|
reference_data[c] = reference ? reference->data[c] : NULL;
|
||||||
linesize[c]=s->linesize[c];
|
linesize[c]=s->linesize[c];
|
||||||
s->coefs_finished[c] |= 1;
|
s->coefs_finished[c] |= 1;
|
||||||
@ -889,7 +891,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int s
|
|||||||
int mb_x, mb_y;
|
int mb_x, mb_y;
|
||||||
int EOBRUN = 0;
|
int EOBRUN = 0;
|
||||||
int c = s->comp_index[0];
|
int c = s->comp_index[0];
|
||||||
uint8_t* data = s->picture.data[c];
|
uint8_t* data = s->picture_ptr->data[c];
|
||||||
const uint8_t *reference_data = reference ? reference->data[c] : NULL;
|
const uint8_t *reference_data = reference ? reference->data[c] : NULL;
|
||||||
int linesize = s->linesize[c];
|
int linesize = s->linesize[c];
|
||||||
int last_scan = 0;
|
int last_scan = 0;
|
||||||
@ -1521,7 +1523,7 @@ eoi_parser:
|
|||||||
if (s->bottom_field == !s->interlace_polarity)
|
if (s->bottom_field == !s->interlace_polarity)
|
||||||
goto not_the_end;
|
goto not_the_end;
|
||||||
}
|
}
|
||||||
*picture = s->picture;
|
*picture = *s->picture_ptr;
|
||||||
*data_size = sizeof(AVFrame);
|
*data_size = sizeof(AVFrame);
|
||||||
|
|
||||||
if(!s->lossless){
|
if(!s->lossless){
|
||||||
@ -1593,8 +1595,8 @@ av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
|
|||||||
MJpegDecodeContext *s = avctx->priv_data;
|
MJpegDecodeContext *s = avctx->priv_data;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
if (s->picture.data[0])
|
if (s->picture_ptr && s->picture_ptr->data[0])
|
||||||
avctx->release_buffer(avctx, &s->picture);
|
avctx->release_buffer(avctx, s->picture_ptr);
|
||||||
|
|
||||||
av_free(s->buffer);
|
av_free(s->buffer);
|
||||||
av_free(s->qscale_table);
|
av_free(s->qscale_table);
|
||||||
|
@ -81,6 +81,7 @@ typedef struct MJpegDecodeContext {
|
|||||||
int quant_index[4]; /* quant table index for each component */
|
int quant_index[4]; /* quant table index for each component */
|
||||||
int last_dc[MAX_COMPONENTS]; /* last DEQUANTIZED dc (XXX: am I right to do that ?) */
|
int last_dc[MAX_COMPONENTS]; /* last DEQUANTIZED dc (XXX: am I right to do that ?) */
|
||||||
AVFrame picture; /* picture structure */
|
AVFrame picture; /* picture structure */
|
||||||
|
AVFrame *picture_ptr; /* pointer to picture structure */
|
||||||
int got_picture; ///< we found a SOF and picture is valid, too.
|
int got_picture; ///< we found a SOF and picture is valid, too.
|
||||||
int linesize[MAX_COMPONENTS]; ///< linesize << interlaced
|
int linesize[MAX_COMPONENTS]; ///< linesize << interlaced
|
||||||
int8_t *qscale_table;
|
int8_t *qscale_table;
|
||||||
|
Loading…
Reference in New Issue
Block a user