mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Merge commit 'd4f1188d1a662fed5347e70016da49e01563e8a8'
* commit 'd4f1188d1a662fed5347e70016da49e01563e8a8': dv: use AVFrame API properly Conflicts: libavcodec/dvdec.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
94a849b8b6
@ -320,8 +320,6 @@ av_cold int ff_dvvideo_init(AVCodecContext *avctx)
|
|||||||
}else
|
}else
|
||||||
memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64);
|
memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64);
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->picture);
|
|
||||||
avctx->coded_frame = &s->picture;
|
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
|
avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
|
|
||||||
typedef struct DVVideoContext {
|
typedef struct DVVideoContext {
|
||||||
const DVprofile *sys;
|
const DVprofile *sys;
|
||||||
AVFrame picture;
|
AVFrame *frame;
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
uint8_t *buf;
|
uint8_t *buf;
|
||||||
|
|
||||||
|
@ -259,12 +259,12 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg)
|
|||||||
if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) ||
|
if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) ||
|
||||||
(s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
|
(s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
|
||||||
(s->sys->height >= 720 && mb_y != 134)) {
|
(s->sys->height >= 720 && mb_y != 134)) {
|
||||||
y_stride = (s->picture.linesize[0] << ((!is_field_mode[mb_index]) * log2_blocksize));
|
y_stride = (s->frame->linesize[0] << ((!is_field_mode[mb_index]) * log2_blocksize));
|
||||||
} else {
|
} else {
|
||||||
y_stride = (2 << log2_blocksize);
|
y_stride = (2 << log2_blocksize);
|
||||||
}
|
}
|
||||||
y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << log2_blocksize);
|
y_ptr = s->frame->data[0] + ((mb_y * s->frame->linesize[0] + mb_x) << log2_blocksize);
|
||||||
linesize = s->picture.linesize[0] << is_field_mode[mb_index];
|
linesize = s->frame->linesize[0] << is_field_mode[mb_index];
|
||||||
mb[0] .idct_put(y_ptr , linesize, block + 0*64);
|
mb[0] .idct_put(y_ptr , linesize, block + 0*64);
|
||||||
if (s->sys->video_stype == 4) { /* SD 422 */
|
if (s->sys->video_stype == 4) { /* SD 422 */
|
||||||
mb[2].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 2*64);
|
mb[2].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 2*64);
|
||||||
@ -277,19 +277,19 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg)
|
|||||||
block += 4*64;
|
block += 4*64;
|
||||||
|
|
||||||
/* idct_put'ting chrominance */
|
/* idct_put'ting chrominance */
|
||||||
c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->picture.linesize[1] +
|
c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->frame->linesize[1] +
|
||||||
(mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << log2_blocksize);
|
(mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << log2_blocksize);
|
||||||
for (j = 2; j; j--) {
|
for (j = 2; j; j--) {
|
||||||
uint8_t *c_ptr = s->picture.data[j] + c_offset;
|
uint8_t *c_ptr = s->frame->data[j] + c_offset;
|
||||||
if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
|
if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
|
||||||
uint64_t aligned_pixels[64/8];
|
uint64_t aligned_pixels[64/8];
|
||||||
uint8_t *pixels = (uint8_t*)aligned_pixels;
|
uint8_t *pixels = (uint8_t*)aligned_pixels;
|
||||||
uint8_t *c_ptr1, *ptr1;
|
uint8_t *c_ptr1, *ptr1;
|
||||||
int x, y;
|
int x, y;
|
||||||
mb->idct_put(pixels, 8, block);
|
mb->idct_put(pixels, 8, block);
|
||||||
for (y = 0; y < (1 << log2_blocksize); y++, c_ptr += s->picture.linesize[j], pixels += 8) {
|
for (y = 0; y < (1 << log2_blocksize); y++, c_ptr += s->frame->linesize[j], pixels += 8) {
|
||||||
ptr1 = pixels + ((1 << (log2_blocksize))>>1);
|
ptr1 = pixels + ((1 << (log2_blocksize))>>1);
|
||||||
c_ptr1 = c_ptr + (s->picture.linesize[j] << log2_blocksize);
|
c_ptr1 = c_ptr + (s->frame->linesize[j] << log2_blocksize);
|
||||||
for (x = 0; x < (1 << FFMAX(log2_blocksize - 1, 0)); x++) {
|
for (x = 0; x < (1 << FFMAX(log2_blocksize - 1, 0)); x++) {
|
||||||
c_ptr[x] = pixels[x];
|
c_ptr[x] = pixels[x];
|
||||||
c_ptr1[x] = ptr1[x];
|
c_ptr1[x] = ptr1[x];
|
||||||
@ -298,8 +298,8 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg)
|
|||||||
block += 64; mb++;
|
block += 64; mb++;
|
||||||
} else {
|
} else {
|
||||||
y_stride = (mb_y == 134) ? (1 << log2_blocksize) :
|
y_stride = (mb_y == 134) ? (1 << log2_blocksize) :
|
||||||
s->picture.linesize[j] << ((!is_field_mode[mb_index]) * log2_blocksize);
|
s->frame->linesize[j] << ((!is_field_mode[mb_index]) * log2_blocksize);
|
||||||
linesize = s->picture.linesize[j] << is_field_mode[mb_index];
|
linesize = s->frame->linesize[j] << is_field_mode[mb_index];
|
||||||
(mb++)-> idct_put(c_ptr , linesize, block); block += 64;
|
(mb++)-> idct_put(c_ptr , linesize, block); block += 64;
|
||||||
if (s->sys->bpm == 8) {
|
if (s->sys->bpm == 8) {
|
||||||
(mb++)->idct_put(c_ptr + y_stride, linesize, block); block += 64;
|
(mb++)->idct_put(c_ptr + y_stride, linesize, block); block += 64;
|
||||||
@ -328,8 +328,9 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
|
|||||||
return -1; /* NOTE: we only accept several full frames */
|
return -1; /* NOTE: we only accept several full frames */
|
||||||
}
|
}
|
||||||
|
|
||||||
s->picture.key_frame = 1;
|
s->frame = data;
|
||||||
s->picture.pict_type = AV_PICTURE_TYPE_I;
|
s->frame->key_frame = 1;
|
||||||
|
s->frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
avctx->pix_fmt = s->sys->pix_fmt;
|
avctx->pix_fmt = s->sys->pix_fmt;
|
||||||
avctx->time_base = s->sys->time_base;
|
avctx->time_base = s->sys->time_base;
|
||||||
|
|
||||||
@ -337,10 +338,10 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if ((ret = ff_get_buffer(avctx, &s->picture, 0)) < 0)
|
if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
s->picture.interlaced_frame = 1;
|
s->frame->interlaced_frame = 1;
|
||||||
s->picture.top_field_first = 0;
|
s->frame->top_field_first = 0;
|
||||||
|
|
||||||
/* Determine the codec's sample_aspect ratio and field order from the packet */
|
/* Determine the codec's sample_aspect ratio and field order from the packet */
|
||||||
vsc_pack = buf + 80*5 + 48 + 5;
|
vsc_pack = buf + 80*5 + 48 + 5;
|
||||||
@ -348,7 +349,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
|
|||||||
apt = buf[4] & 0x07;
|
apt = buf[4] & 0x07;
|
||||||
is16_9 = (vsc_pack[2] & 0x07) == 0x02 || (!apt && (vsc_pack[2] & 0x07) == 0x07);
|
is16_9 = (vsc_pack[2] & 0x07) == 0x02 || (!apt && (vsc_pack[2] & 0x07) == 0x07);
|
||||||
avctx->sample_aspect_ratio = s->sys->sar[is16_9];
|
avctx->sample_aspect_ratio = s->sys->sar[is16_9];
|
||||||
s->picture.top_field_first = !(vsc_pack[3] & 0x40);
|
s->frame->top_field_first = !(vsc_pack[3] & 0x40);
|
||||||
}
|
}
|
||||||
|
|
||||||
s->buf = buf;
|
s->buf = buf;
|
||||||
@ -359,20 +360,10 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
/* return image */
|
/* return image */
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
av_frame_move_ref(data, &s->picture);
|
|
||||||
|
|
||||||
return s->sys->frame_size;
|
return s->sys->frame_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dvvideo_close(AVCodecContext *c)
|
|
||||||
{
|
|
||||||
DVVideoContext *s = c->priv_data;
|
|
||||||
|
|
||||||
av_frame_unref(&s->picture);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
AVCodec ff_dvvideo_decoder = {
|
AVCodec ff_dvvideo_decoder = {
|
||||||
.name = "dvvideo",
|
.name = "dvvideo",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
|
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
|
||||||
@ -380,7 +371,6 @@ AVCodec ff_dvvideo_decoder = {
|
|||||||
.id = AV_CODEC_ID_DVVIDEO,
|
.id = AV_CODEC_ID_DVVIDEO,
|
||||||
.priv_data_size = sizeof(DVVideoContext),
|
.priv_data_size = sizeof(DVVideoContext),
|
||||||
.init = ff_dvvideo_init,
|
.init = ff_dvvideo_init,
|
||||||
.close = dvvideo_close,
|
|
||||||
.decode = dvvideo_decode_frame,
|
.decode = dvvideo_decode_frame,
|
||||||
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
|
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
|
||||||
.max_lowres = 3,
|
.max_lowres = 3,
|
||||||
|
@ -47,6 +47,10 @@ static av_cold int dvvideo_init_encoder(AVCodecContext *avctx)
|
|||||||
return AVERROR_PATCHWELCOME;
|
return AVERROR_PATCHWELCOME;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
avctx->coded_frame = av_frame_alloc();
|
||||||
|
if (!avctx->coded_frame)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
dv_vlc_map_tableinit();
|
dv_vlc_map_tableinit();
|
||||||
|
|
||||||
return ff_dvvideo_init(avctx);
|
return ff_dvvideo_init(avctx);
|
||||||
@ -392,12 +396,12 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
|
|||||||
if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) ||
|
if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) ||
|
||||||
(s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
|
(s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
|
||||||
(s->sys->height >= 720 && mb_y != 134)) {
|
(s->sys->height >= 720 && mb_y != 134)) {
|
||||||
y_stride = s->picture.linesize[0] << 3;
|
y_stride = s->frame->linesize[0] << 3;
|
||||||
} else {
|
} else {
|
||||||
y_stride = 16;
|
y_stride = 16;
|
||||||
}
|
}
|
||||||
y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << 3);
|
y_ptr = s->frame->data[0] + ((mb_y * s->frame->linesize[0] + mb_x) << 3);
|
||||||
linesize = s->picture.linesize[0];
|
linesize = s->frame->linesize[0];
|
||||||
|
|
||||||
if (s->sys->video_stype == 4) { /* SD 422 */
|
if (s->sys->video_stype == 4) { /* SD 422 */
|
||||||
vs_bit_size +=
|
vs_bit_size +=
|
||||||
@ -415,12 +419,12 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
|
|||||||
enc_blk += 4;
|
enc_blk += 4;
|
||||||
|
|
||||||
/* initializing chrominance blocks */
|
/* initializing chrominance blocks */
|
||||||
c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->picture.linesize[1] +
|
c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->frame->linesize[1] +
|
||||||
(mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << 3);
|
(mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << 3);
|
||||||
for (j = 2; j; j--) {
|
for (j = 2; j; j--) {
|
||||||
uint8_t *c_ptr = s->picture.data[j] + c_offset;
|
uint8_t *c_ptr = s->frame->data[j] + c_offset;
|
||||||
linesize = s->picture.linesize[j];
|
linesize = s->frame->linesize[j];
|
||||||
y_stride = (mb_y == 134) ? 8 : (s->picture.linesize[j] << 3);
|
y_stride = (mb_y == 134) ? 8 : (s->frame->linesize[j] << 3);
|
||||||
if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
|
if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
|
||||||
uint8_t* d;
|
uint8_t* d;
|
||||||
uint8_t* b = scratch;
|
uint8_t* b = scratch;
|
||||||
@ -519,7 +523,7 @@ static inline int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c,
|
|||||||
* compression scheme (if any).
|
* compression scheme (if any).
|
||||||
*/
|
*/
|
||||||
int apt = (c->sys->pix_fmt == AV_PIX_FMT_YUV420P ? 0 : 1);
|
int apt = (c->sys->pix_fmt == AV_PIX_FMT_YUV420P ? 0 : 1);
|
||||||
int fs = c->picture.top_field_first ? 0x00 : 0x40;
|
int fs = c->frame->top_field_first ? 0x00 : 0x40;
|
||||||
|
|
||||||
uint8_t aspect = 0;
|
uint8_t aspect = 0;
|
||||||
if ((int)(av_q2d(c->avctx->sample_aspect_ratio) * c->avctx->width / c->avctx->height * 10) >= 17) /* 16:9 */
|
if ((int)(av_q2d(c->avctx->sample_aspect_ratio) * c->avctx->width / c->avctx->height * 10) >= 17) /* 16:9 */
|
||||||
@ -667,10 +671,10 @@ static int dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt,
|
|||||||
if ((ret = ff_alloc_packet2(c, pkt, s->sys->frame_size)) < 0)
|
if ((ret = ff_alloc_packet2(c, pkt, s->sys->frame_size)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
c->pix_fmt = s->sys->pix_fmt;
|
c->pix_fmt = s->sys->pix_fmt;
|
||||||
s->picture = *frame;
|
s->frame = frame;
|
||||||
s->picture.key_frame = 1;
|
c->coded_frame->key_frame = 1;
|
||||||
s->picture.pict_type = AV_PICTURE_TYPE_I;
|
c->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
|
|
||||||
s->buf = pkt->data;
|
s->buf = pkt->data;
|
||||||
c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL,
|
c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL,
|
||||||
@ -686,6 +690,12 @@ static int dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dvvideo_encode_close(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
av_frame_free(&avctx->coded_frame);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
AVCodec ff_dvvideo_encoder = {
|
AVCodec ff_dvvideo_encoder = {
|
||||||
.name = "dvvideo",
|
.name = "dvvideo",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
|
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
|
||||||
@ -694,6 +704,7 @@ AVCodec ff_dvvideo_encoder = {
|
|||||||
.priv_data_size = sizeof(DVVideoContext),
|
.priv_data_size = sizeof(DVVideoContext),
|
||||||
.init = dvvideo_init_encoder,
|
.init = dvvideo_init_encoder,
|
||||||
.encode2 = dvvideo_encode_frame,
|
.encode2 = dvvideo_encode_frame,
|
||||||
|
.close = dvvideo_encode_close,
|
||||||
.capabilities = CODEC_CAP_SLICE_THREADS,
|
.capabilities = CODEC_CAP_SLICE_THREADS,
|
||||||
.pix_fmts = (const enum AVPixelFormat[]) {
|
.pix_fmts = (const enum AVPixelFormat[]) {
|
||||||
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
|
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
|
||||||
|
Loading…
Reference in New Issue
Block a user