mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
AVVideoFrame -> AVFrame
Originally committed as revision 1327 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
f694168d52
commit
492cd3a920
18
ffmpeg.c
18
ffmpeg.c
@ -629,9 +629,9 @@ static void do_video_out(AVFormatContext *s,
|
|||||||
/* XXX: pb because no interleaving */
|
/* XXX: pb because no interleaving */
|
||||||
for(i=0;i<nb_frames;i++) {
|
for(i=0;i<nb_frames;i++) {
|
||||||
if (enc->codec_id != CODEC_ID_RAWVIDEO) {
|
if (enc->codec_id != CODEC_ID_RAWVIDEO) {
|
||||||
AVVideoFrame big_picture;
|
AVFrame big_picture;
|
||||||
|
|
||||||
memset(&big_picture, 0, sizeof(AVVideoFrame));
|
memset(&big_picture, 0, sizeof(AVFrame));
|
||||||
*(AVPicture*)&big_picture= *final_picture;
|
*(AVPicture*)&big_picture= *final_picture;
|
||||||
|
|
||||||
/* handles sameq here. This is not correct because it may
|
/* handles sameq here. This is not correct because it may
|
||||||
@ -709,9 +709,9 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
|
|||||||
total_size += frame_size;
|
total_size += frame_size;
|
||||||
if (enc->codec_type == CODEC_TYPE_VIDEO) {
|
if (enc->codec_type == CODEC_TYPE_VIDEO) {
|
||||||
frame_number = ost->frame_number;
|
frame_number = ost->frame_number;
|
||||||
fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_picture->quality);
|
fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality);
|
||||||
if (enc->flags&CODEC_FLAG_PSNR)
|
if (enc->flags&CODEC_FLAG_PSNR)
|
||||||
fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_picture->error[0]/(enc->width*enc->height*255.0*255.0)));
|
fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
|
||||||
|
|
||||||
fprintf(fvstats,"f_size= %6d ", frame_size);
|
fprintf(fvstats,"f_size= %6d ", frame_size);
|
||||||
/* compute pts value */
|
/* compute pts value */
|
||||||
@ -723,7 +723,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
|
|||||||
avg_bitrate = (double)(total_size * 8) / ti1 / 1000.0;
|
avg_bitrate = (double)(total_size * 8) / ti1 / 1000.0;
|
||||||
fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
|
fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
|
||||||
(double)total_size / 1024, ti1, bitrate, avg_bitrate);
|
(double)total_size / 1024, ti1, bitrate, avg_bitrate);
|
||||||
fprintf(fvstats,"type= %s\n", enc->coded_picture->key_frame == 1 ? "I" : "P");
|
fprintf(fvstats,"type= %s\n", enc->coded_frame->key_frame == 1 ? "I" : "P");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -767,14 +767,14 @@ void print_report(AVFormatContext **output_files,
|
|||||||
enc = &ost->st->codec;
|
enc = &ost->st->codec;
|
||||||
if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
|
if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
|
||||||
sprintf(buf + strlen(buf), "q=%2.1f ",
|
sprintf(buf + strlen(buf), "q=%2.1f ",
|
||||||
enc->coded_picture->quality);
|
enc->coded_frame->quality);
|
||||||
}
|
}
|
||||||
if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
|
if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
|
||||||
frame_number = ost->frame_number;
|
frame_number = ost->frame_number;
|
||||||
sprintf(buf + strlen(buf), "frame=%5d q=%2.1f ",
|
sprintf(buf + strlen(buf), "frame=%5d q=%2.1f ",
|
||||||
frame_number, enc->coded_picture ? enc->coded_picture->quality : 0);
|
frame_number, enc->coded_frame ? enc->coded_frame->quality : 0);
|
||||||
if (enc->flags&CODEC_FLAG_PSNR)
|
if (enc->flags&CODEC_FLAG_PSNR)
|
||||||
sprintf(buf + strlen(buf), "PSNR= %6.2f ", psnr(enc->coded_picture->error[0]/(enc->width*enc->height*255.0*255.0)));
|
sprintf(buf + strlen(buf), "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
|
||||||
vid = 1;
|
vid = 1;
|
||||||
}
|
}
|
||||||
/* compute min output value */
|
/* compute min output value */
|
||||||
@ -1287,7 +1287,7 @@ static int av_encode(AVFormatContext **output_files,
|
|||||||
ist->st->codec.height);
|
ist->st->codec.height);
|
||||||
ret = len;
|
ret = len;
|
||||||
} else {
|
} else {
|
||||||
AVVideoFrame big_picture;
|
AVFrame big_picture;
|
||||||
|
|
||||||
data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
|
data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
|
||||||
ret = avcodec_decode_video(&ist->st->codec,
|
ret = avcodec_decode_video(&ist->st->codec,
|
||||||
|
@ -1955,7 +1955,7 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
|
|||||||
/* we use the codec indication because it is
|
/* we use the codec indication because it is
|
||||||
more accurate than the demux flags */
|
more accurate than the demux flags */
|
||||||
pkt->flags = 0;
|
pkt->flags = 0;
|
||||||
if (st->codec.coded_picture->key_frame)
|
if (st->codec.coded_frame->key_frame)
|
||||||
pkt->flags |= PKT_FLAG_KEY;
|
pkt->flags |= PKT_FLAG_KEY;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2211,7 +2211,7 @@ static int http_prepare_data(HTTPContext *c)
|
|||||||
codec = &ctx->streams[pkt.stream_index]->codec;
|
codec = &ctx->streams[pkt.stream_index]->codec;
|
||||||
}
|
}
|
||||||
|
|
||||||
codec->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0);
|
codec->coded_frame->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0);
|
||||||
|
|
||||||
#ifdef PJSG
|
#ifdef PJSG
|
||||||
if (codec->codec_type == CODEC_TYPE_AUDIO) {
|
if (codec->codec_type == CODEC_TYPE_AUDIO) {
|
||||||
|
@ -826,7 +826,6 @@ static int AC3_encode_init(AVCodecContext *avctx)
|
|||||||
};
|
};
|
||||||
|
|
||||||
avctx->frame_size = AC3_FRAME_SIZE;
|
avctx->frame_size = AC3_FRAME_SIZE;
|
||||||
avctx->key_frame = 1; /* always key frame */
|
|
||||||
|
|
||||||
/* number of channels */
|
/* number of channels */
|
||||||
if (channels < 1 || channels > 6)
|
if (channels < 1 || channels > 6)
|
||||||
@ -890,6 +889,9 @@ static int AC3_encode_init(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ac3_crc_init();
|
ac3_crc_init();
|
||||||
|
|
||||||
|
avctx->coded_frame= avcodec_alloc_frame();
|
||||||
|
avctx->coded_frame->key_frame= 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1447,6 +1449,11 @@ static int AC3_encode_frame(AVCodecContext *avctx,
|
|||||||
return output_frame_end(s);
|
return output_frame_end(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int AC3_encode_close(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
av_freep(&avctx->coded_frame);
|
||||||
|
}
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
/*************************************************************************/
|
/*************************************************************************/
|
||||||
/* TEST */
|
/* TEST */
|
||||||
@ -1546,5 +1553,6 @@ AVCodec ac3_encoder = {
|
|||||||
sizeof(AC3EncodeContext),
|
sizeof(AC3EncodeContext),
|
||||||
AC3_encode_init,
|
AC3_encode_init,
|
||||||
AC3_encode_frame,
|
AC3_encode_frame,
|
||||||
|
AC3_encode_close,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
@ -126,12 +126,17 @@ static int adpcm_encode_init(AVCodecContext *avctx)
|
|||||||
return -1;
|
return -1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
avctx->coded_frame= avcodec_alloc_frame();
|
||||||
|
avctx->coded_frame->key_frame= 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int adpcm_encode_close(AVCodecContext *avctx)
|
static int adpcm_encode_close(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
/* nothing to free */
|
av_freep(&avctx->coded_frame);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,7 +258,6 @@ static int adpcm_encode_frame(AVCodecContext *avctx,
|
|||||||
default:
|
default:
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
avctx->key_frame = 1;
|
|
||||||
return dst - frame;
|
return dst - frame;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ void video_encode_example(const char *filename)
|
|||||||
AVCodecContext *c= NULL;
|
AVCodecContext *c= NULL;
|
||||||
int i, out_size, size, x, y, outbuf_size;
|
int i, out_size, size, x, y, outbuf_size;
|
||||||
FILE *f;
|
FILE *f;
|
||||||
AVVideoFrame *picture;
|
AVFrame *picture;
|
||||||
UINT8 *outbuf, *picture_buf;
|
UINT8 *outbuf, *picture_buf;
|
||||||
|
|
||||||
printf("Video encoding\n");
|
printf("Video encoding\n");
|
||||||
@ -177,7 +177,7 @@ void video_encode_example(const char *filename)
|
|||||||
}
|
}
|
||||||
|
|
||||||
c= avcodec_alloc_context();
|
c= avcodec_alloc_context();
|
||||||
picture= avcodec_alloc_picture();
|
picture= avcodec_alloc_frame();
|
||||||
|
|
||||||
/* put sample parameters */
|
/* put sample parameters */
|
||||||
c->bit_rate = 400000;
|
c->bit_rate = 400000;
|
||||||
@ -278,7 +278,7 @@ void video_decode_example(const char *outfilename, const char *filename)
|
|||||||
AVCodecContext *c= NULL;
|
AVCodecContext *c= NULL;
|
||||||
int frame, size, got_picture, len;
|
int frame, size, got_picture, len;
|
||||||
FILE *f;
|
FILE *f;
|
||||||
AVVideoFrame *picture;
|
AVFrame *picture;
|
||||||
UINT8 inbuf[INBUF_SIZE], *inbuf_ptr;
|
UINT8 inbuf[INBUF_SIZE], *inbuf_ptr;
|
||||||
char buf[1024];
|
char buf[1024];
|
||||||
|
|
||||||
@ -292,7 +292,7 @@ void video_decode_example(const char *outfilename, const char *filename)
|
|||||||
}
|
}
|
||||||
|
|
||||||
c= avcodec_alloc_context();
|
c= avcodec_alloc_context();
|
||||||
picture= avcodec_alloc_picture();
|
picture= avcodec_alloc_frame();
|
||||||
|
|
||||||
if(codec->capabilities&CODEC_CAP_TRUNCATED)
|
if(codec->capabilities&CODEC_CAP_TRUNCATED)
|
||||||
c->flags|= CODEC_FLAG_TRUNCATED; /* we dont send complete frames */
|
c->flags|= CODEC_FLAG_TRUNCATED; /* we dont send complete frames */
|
||||||
|
@ -5,8 +5,8 @@
|
|||||||
|
|
||||||
#define LIBAVCODEC_VERSION_INT 0x000406
|
#define LIBAVCODEC_VERSION_INT 0x000406
|
||||||
#define LIBAVCODEC_VERSION "0.4.6"
|
#define LIBAVCODEC_VERSION "0.4.6"
|
||||||
#define LIBAVCODEC_BUILD 4644
|
#define LIBAVCODEC_BUILD 4645
|
||||||
#define LIBAVCODEC_BUILD_STR "4644"
|
#define LIBAVCODEC_BUILD_STR "4645"
|
||||||
|
|
||||||
enum CodecID {
|
enum CodecID {
|
||||||
CODEC_ID_NONE,
|
CODEC_ID_NONE,
|
||||||
@ -159,7 +159,7 @@ static const int Motion_Est_QTab[] = { ME_ZERO, ME_PHODS, ME_LOG,
|
|||||||
|
|
||||||
#define FRAME_RATE_BASE 10000
|
#define FRAME_RATE_BASE 10000
|
||||||
|
|
||||||
#define FF_COMMON_PICTURE \
|
#define FF_COMMON_FRAME \
|
||||||
uint8_t *data[4];\
|
uint8_t *data[4];\
|
||||||
int linesize[4];\
|
int linesize[4];\
|
||||||
/**\
|
/**\
|
||||||
@ -279,9 +279,9 @@ static const int Motion_Est_QTab[] = { ME_ZERO, ME_PHODS, ME_LOG,
|
|||||||
#define FF_B_TYPE 3 // Bi-dir predicted
|
#define FF_B_TYPE 3 // Bi-dir predicted
|
||||||
#define FF_S_TYPE 4 // S(GMC)-VOP MPEG4
|
#define FF_S_TYPE 4 // S(GMC)-VOP MPEG4
|
||||||
|
|
||||||
typedef struct AVVideoFrame {
|
typedef struct AVFrame {
|
||||||
FF_COMMON_PICTURE
|
FF_COMMON_FRAME
|
||||||
} AVVideoFrame;
|
} AVFrame;
|
||||||
|
|
||||||
typedef struct AVCodecContext {
|
typedef struct AVCodecContext {
|
||||||
/**
|
/**
|
||||||
@ -395,13 +395,6 @@ typedef struct AVCodecContext {
|
|||||||
int real_pict_num; /* returns the real picture number of
|
int real_pict_num; /* returns the real picture number of
|
||||||
previous encoded frame */
|
previous encoded frame */
|
||||||
|
|
||||||
/**
|
|
||||||
* 1 -> keyframe, 0-> not (this if for audio only, for video, AVVideoFrame.key_frame should be used)
|
|
||||||
* encoding: set by lavc (for the outputed bitstream, not the input frame)
|
|
||||||
* decoding: set by lavc (for the decoded bitstream, not the displayed frame)
|
|
||||||
*/
|
|
||||||
int key_frame;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* number of frames the decoded output will be delayed relative to
|
* number of frames the decoded output will be delayed relative to
|
||||||
* the encoded input
|
* the encoded input
|
||||||
@ -574,7 +567,7 @@ typedef struct AVCodecContext {
|
|||||||
* encoding: unused
|
* encoding: unused
|
||||||
* decoding: set by lavc, user can override
|
* decoding: set by lavc, user can override
|
||||||
*/
|
*/
|
||||||
int (*get_buffer)(struct AVCodecContext *c, AVVideoFrame *pic);
|
int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* called to release buffers which where allocated with get_buffer.
|
* called to release buffers which where allocated with get_buffer.
|
||||||
@ -583,7 +576,7 @@ typedef struct AVCodecContext {
|
|||||||
* encoding: unused
|
* encoding: unused
|
||||||
* decoding: set by lavc, user can override
|
* decoding: set by lavc, user can override
|
||||||
*/
|
*/
|
||||||
void (*release_buffer)(struct AVCodecContext *c, AVVideoFrame *pic);
|
void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* is 1 if the decoded stream contains b frames, 0 otherwise
|
* is 1 if the decoded stream contains b frames, 0 otherwise
|
||||||
@ -820,7 +813,7 @@ typedef struct AVCodecContext {
|
|||||||
* encoding: set by lavc
|
* encoding: set by lavc
|
||||||
* decoding: set by lavc
|
* decoding: set by lavc
|
||||||
*/
|
*/
|
||||||
AVVideoFrame *coded_picture;
|
AVFrame *coded_frame;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* debug
|
* debug
|
||||||
@ -1001,16 +994,16 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
|
|||||||
|
|
||||||
void avcodec_get_context_defaults(AVCodecContext *s);
|
void avcodec_get_context_defaults(AVCodecContext *s);
|
||||||
AVCodecContext *avcodec_alloc_context(void);
|
AVCodecContext *avcodec_alloc_context(void);
|
||||||
AVVideoFrame *avcodec_alloc_picture(void);
|
AVFrame *avcodec_alloc_frame(void);
|
||||||
|
|
||||||
int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic);
|
int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
|
||||||
void avcodec_default_release_buffer(AVCodecContext *s, AVVideoFrame *pic);
|
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
|
||||||
|
|
||||||
int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
|
int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
|
||||||
int avcodec_decode_audio(AVCodecContext *avctx, INT16 *samples,
|
int avcodec_decode_audio(AVCodecContext *avctx, INT16 *samples,
|
||||||
int *frame_size_ptr,
|
int *frame_size_ptr,
|
||||||
UINT8 *buf, int buf_size);
|
UINT8 *buf, int buf_size);
|
||||||
int avcodec_decode_video(AVCodecContext *avctx, AVVideoFrame *picture,
|
int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
|
||||||
int *got_picture_ptr,
|
int *got_picture_ptr,
|
||||||
UINT8 *buf, int buf_size);
|
UINT8 *buf, int buf_size);
|
||||||
int avcodec_parse_frame(AVCodecContext *avctx, UINT8 **pdata,
|
int avcodec_parse_frame(AVCodecContext *avctx, UINT8 **pdata,
|
||||||
@ -1019,7 +1012,7 @@ int avcodec_parse_frame(AVCodecContext *avctx, UINT8 **pdata,
|
|||||||
int avcodec_encode_audio(AVCodecContext *avctx, UINT8 *buf, int buf_size,
|
int avcodec_encode_audio(AVCodecContext *avctx, UINT8 *buf, int buf_size,
|
||||||
const short *samples);
|
const short *samples);
|
||||||
int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
|
int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
|
||||||
const AVVideoFrame *pict);
|
const AVFrame *pict);
|
||||||
|
|
||||||
int avcodec_close(AVCodecContext *avctx);
|
int avcodec_close(AVCodecContext *avctx);
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ typedef struct DVVideoDecodeContext {
|
|||||||
int sampling_411; /* 0 = 420, 1 = 411 */
|
int sampling_411; /* 0 = 420, 1 = 411 */
|
||||||
int width, height;
|
int width, height;
|
||||||
UINT8 *current_picture[3]; /* picture structure */
|
UINT8 *current_picture[3]; /* picture structure */
|
||||||
AVVideoFrame picture;
|
AVFrame picture;
|
||||||
int linesize[3];
|
int linesize[3];
|
||||||
DCTELEM block[5*6][64] __align8;
|
DCTELEM block[5*6][64] __align8;
|
||||||
UINT8 dv_zigzag[2][64];
|
UINT8 dv_zigzag[2][64];
|
||||||
@ -595,8 +595,8 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
|
|||||||
emms_c();
|
emms_c();
|
||||||
|
|
||||||
/* return image */
|
/* return image */
|
||||||
*data_size = sizeof(AVVideoFrame);
|
*data_size = sizeof(AVFrame);
|
||||||
*(AVVideoFrame*)data= s->picture;
|
*(AVFrame*)data= s->picture;
|
||||||
|
|
||||||
avctx->release_buffer(avctx, &s->picture);
|
avctx->release_buffer(avctx, &s->picture);
|
||||||
|
|
||||||
|
@ -349,7 +349,7 @@ static int h263_decode_frame(AVCodecContext *avctx,
|
|||||||
{
|
{
|
||||||
MpegEncContext *s = avctx->priv_data;
|
MpegEncContext *s = avctx->priv_data;
|
||||||
int ret,i;
|
int ret,i;
|
||||||
AVVideoFrame *pict = data;
|
AVFrame *pict = data;
|
||||||
float new_aspect;
|
float new_aspect;
|
||||||
|
|
||||||
#ifdef PRINT_FRAME_TIME
|
#ifdef PRINT_FRAME_TIME
|
||||||
@ -676,9 +676,9 @@ retry:
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if(s->pict_type==B_TYPE || s->low_delay){
|
if(s->pict_type==B_TYPE || s->low_delay){
|
||||||
*pict= *(AVVideoFrame*)&s->current_picture;
|
*pict= *(AVFrame*)&s->current_picture;
|
||||||
} else {
|
} else {
|
||||||
*pict= *(AVVideoFrame*)&s->last_picture;
|
*pict= *(AVFrame*)&s->last_picture;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return the Picture timestamp as the frame number */
|
/* Return the Picture timestamp as the frame number */
|
||||||
@ -687,7 +687,7 @@ retry:
|
|||||||
|
|
||||||
/* dont output the last pic after seeking */
|
/* dont output the last pic after seeking */
|
||||||
if(s->last_picture.data[0] || s->low_delay)
|
if(s->last_picture.data[0] || s->low_delay)
|
||||||
*data_size = sizeof(AVVideoFrame);
|
*data_size = sizeof(AVFrame);
|
||||||
#ifdef PRINT_FRAME_TIME
|
#ifdef PRINT_FRAME_TIME
|
||||||
printf("%Ld\n", rdtsc()-time);
|
printf("%Ld\n", rdtsc()-time);
|
||||||
#endif
|
#endif
|
||||||
|
@ -57,7 +57,7 @@ typedef struct HYuvContext{
|
|||||||
uint8_t len[3][256];
|
uint8_t len[3][256];
|
||||||
uint32_t bits[3][256];
|
uint32_t bits[3][256];
|
||||||
VLC vlc[3];
|
VLC vlc[3];
|
||||||
AVVideoFrame picture;
|
AVFrame picture;
|
||||||
uint8_t __align8 bitstream_buffer[1024*1024*3]; //FIXME dynamic alloc or some other solution
|
uint8_t __align8 bitstream_buffer[1024*1024*3]; //FIXME dynamic alloc or some other solution
|
||||||
DSPContext dsp;
|
DSPContext dsp;
|
||||||
}HYuvContext;
|
}HYuvContext;
|
||||||
@ -332,7 +332,7 @@ static int decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
width= s->width= avctx->width;
|
width= s->width= avctx->width;
|
||||||
height= s->height= avctx->height;
|
height= s->height= avctx->height;
|
||||||
avctx->coded_picture= &s->picture;
|
avctx->coded_frame= &s->picture;
|
||||||
|
|
||||||
s->bgr32=1;
|
s->bgr32=1;
|
||||||
assert(width && height);
|
assert(width && height);
|
||||||
@ -460,7 +460,7 @@ static int encode_init(AVCodecContext *avctx)
|
|||||||
avctx->stats_out= av_mallocz(1024*10);
|
avctx->stats_out= av_mallocz(1024*10);
|
||||||
s->version=2;
|
s->version=2;
|
||||||
|
|
||||||
avctx->coded_picture= &s->picture;
|
avctx->coded_frame= &s->picture;
|
||||||
s->picture.pict_type= FF_I_TYPE;
|
s->picture.pict_type= FF_I_TYPE;
|
||||||
s->picture.key_frame= 1;
|
s->picture.key_frame= 1;
|
||||||
|
|
||||||
@ -670,9 +670,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8
|
|||||||
const int width2= s->width>>1;
|
const int width2= s->width>>1;
|
||||||
const int height= s->height;
|
const int height= s->height;
|
||||||
int fake_ystride, fake_ustride, fake_vstride;
|
int fake_ystride, fake_ustride, fake_vstride;
|
||||||
AVVideoFrame * const p= &s->picture;
|
AVFrame * const p= &s->picture;
|
||||||
|
|
||||||
AVVideoFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
|
|
||||||
*data_size = 0;
|
*data_size = 0;
|
||||||
|
|
||||||
@ -893,7 +893,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8
|
|||||||
|
|
||||||
avctx->release_buffer(avctx, p);
|
avctx->release_buffer(avctx, p);
|
||||||
|
|
||||||
*data_size = sizeof(AVVideoFrame);
|
*data_size = sizeof(AVFrame);
|
||||||
|
|
||||||
return (get_bits_count(&s->gb)+7)>>3;
|
return (get_bits_count(&s->gb)+7)>>3;
|
||||||
}
|
}
|
||||||
@ -920,14 +920,14 @@ static int decode_end(AVCodecContext *avctx)
|
|||||||
|
|
||||||
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
|
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
|
||||||
HYuvContext *s = avctx->priv_data;
|
HYuvContext *s = avctx->priv_data;
|
||||||
AVVideoFrame *pict = data;
|
AVFrame *pict = data;
|
||||||
const int width= s->width;
|
const int width= s->width;
|
||||||
const int width2= s->width>>1;
|
const int width2= s->width>>1;
|
||||||
const int height= s->height;
|
const int height= s->height;
|
||||||
const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
|
const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
|
||||||
const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
|
const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
|
||||||
const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
|
const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
|
||||||
AVVideoFrame * const p= &s->picture;
|
AVFrame * const p= &s->picture;
|
||||||
int i, size;
|
int i, size;
|
||||||
|
|
||||||
init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
|
init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
|
||||||
|
@ -1595,7 +1595,7 @@ static void mpeg_decode_extension(AVCodecContext *avctx,
|
|||||||
* DECODE_SLICE_EOP if the end of the picture is reached
|
* DECODE_SLICE_EOP if the end of the picture is reached
|
||||||
*/
|
*/
|
||||||
static int mpeg_decode_slice(AVCodecContext *avctx,
|
static int mpeg_decode_slice(AVCodecContext *avctx,
|
||||||
AVVideoFrame *pict,
|
AVFrame *pict,
|
||||||
int start_code,
|
int start_code,
|
||||||
UINT8 *buf, int buf_size)
|
UINT8 *buf, int buf_size)
|
||||||
{
|
{
|
||||||
@ -1703,7 +1703,7 @@ eos: //end of slice
|
|||||||
MPV_frame_end(s);
|
MPV_frame_end(s);
|
||||||
|
|
||||||
if (s->pict_type == B_TYPE || s->low_delay) {
|
if (s->pict_type == B_TYPE || s->low_delay) {
|
||||||
*pict= *(AVVideoFrame*)&s->current_picture;
|
*pict= *(AVFrame*)&s->current_picture;
|
||||||
} else {
|
} else {
|
||||||
s->picture_number++;
|
s->picture_number++;
|
||||||
/* latency of 1 frame for I and P frames */
|
/* latency of 1 frame for I and P frames */
|
||||||
@ -1711,7 +1711,7 @@ eos: //end of slice
|
|||||||
if (s->picture_number == 1) {
|
if (s->picture_number == 1) {
|
||||||
return DECODE_SLICE_OK;
|
return DECODE_SLICE_OK;
|
||||||
} else {
|
} else {
|
||||||
*pict= *(AVVideoFrame*)&s->last_picture;
|
*pict= *(AVFrame*)&s->last_picture;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return DECODE_SLICE_EOP;
|
return DECODE_SLICE_EOP;
|
||||||
@ -1839,7 +1839,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
|
|||||||
Mpeg1Context *s = avctx->priv_data;
|
Mpeg1Context *s = avctx->priv_data;
|
||||||
UINT8 *buf_end, *buf_ptr, *buf_start;
|
UINT8 *buf_end, *buf_ptr, *buf_start;
|
||||||
int len, start_code_found, ret, code, start_code, input_size;
|
int len, start_code_found, ret, code, start_code, input_size;
|
||||||
AVVideoFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
MpegEncContext *s2 = &s->mpeg_enc_ctx;
|
MpegEncContext *s2 = &s->mpeg_enc_ctx;
|
||||||
|
|
||||||
dprintf("fill_buffer\n");
|
dprintf("fill_buffer\n");
|
||||||
@ -1849,9 +1849,9 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
|
|||||||
/* special case for last picture */
|
/* special case for last picture */
|
||||||
if (buf_size == 0) {
|
if (buf_size == 0) {
|
||||||
if (s2->picture_number > 0) {
|
if (s2->picture_number > 0) {
|
||||||
*picture= *(AVVideoFrame*)&s2->next_picture;
|
*picture= *(AVFrame*)&s2->next_picture;
|
||||||
|
|
||||||
*data_size = sizeof(AVVideoFrame);
|
*data_size = sizeof(AVFrame);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,6 @@ int MPA_encode_init(AVCodecContext *avctx)
|
|||||||
s->freq = freq;
|
s->freq = freq;
|
||||||
s->bit_rate = bitrate * 1000;
|
s->bit_rate = bitrate * 1000;
|
||||||
avctx->frame_size = MPA_FRAME_SIZE;
|
avctx->frame_size = MPA_FRAME_SIZE;
|
||||||
avctx->key_frame = 1; /* always key frame */
|
|
||||||
|
|
||||||
/* encoding freq */
|
/* encoding freq */
|
||||||
s->lsf = 0;
|
s->lsf = 0;
|
||||||
@ -169,6 +168,9 @@ int MPA_encode_init(AVCodecContext *avctx)
|
|||||||
total_quant_bits[i] = 12 * v;
|
total_quant_bits[i] = 12 * v;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
avctx->coded_frame= avcodec_alloc_frame();
|
||||||
|
avctx->coded_frame->key_frame= 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -765,6 +767,10 @@ int MPA_encode_frame(AVCodecContext *avctx,
|
|||||||
return pbBufPtr(&s->pb) - s->pb.buf;
|
return pbBufPtr(&s->pb) - s->pb.buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int MPA_encode_close(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
av_freep(&avctx->coded_frame);
|
||||||
|
}
|
||||||
|
|
||||||
AVCodec mp2_encoder = {
|
AVCodec mp2_encoder = {
|
||||||
"mp2",
|
"mp2",
|
||||||
@ -773,6 +779,7 @@ AVCodec mp2_encoder = {
|
|||||||
sizeof(MpegAudioContext),
|
sizeof(MpegAudioContext),
|
||||||
MPA_encode_init,
|
MPA_encode_init,
|
||||||
MPA_encode_frame,
|
MPA_encode_frame,
|
||||||
|
MPA_encode_close,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -282,7 +282,7 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
|
|||||||
|
|
||||||
assert(!pic->data[0]);
|
assert(!pic->data[0]);
|
||||||
|
|
||||||
r= s->avctx->get_buffer(s->avctx, (AVVideoFrame*)pic);
|
r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
|
||||||
|
|
||||||
if(r<0 || !pic->age || !pic->type || !pic->data[0]){
|
if(r<0 || !pic->age || !pic->type || !pic->data[0]){
|
||||||
fprintf(stderr, "get_buffer() failed (%d %d %d %X)\n", r, pic->age, pic->type, (int)pic->data[0]);
|
fprintf(stderr, "get_buffer() failed (%d %d %d %X)\n", r, pic->age, pic->type, (int)pic->data[0]);
|
||||||
@ -327,7 +327,7 @@ static void free_picture(MpegEncContext *s, Picture *pic){
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
|
if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
|
||||||
s->avctx->release_buffer(s->avctx, (AVVideoFrame*)pic);
|
s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
|
||||||
}
|
}
|
||||||
|
|
||||||
av_freep(&pic->mb_var);
|
av_freep(&pic->mb_var);
|
||||||
@ -383,7 +383,7 @@ int MPV_common_init(MpegEncContext *s)
|
|||||||
|
|
||||||
CHECKED_ALLOCZ(s->edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
|
CHECKED_ALLOCZ(s->edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
|
||||||
|
|
||||||
s->avctx->coded_picture= (AVVideoFrame*)&s->current_picture;
|
s->avctx->coded_frame= (AVFrame*)&s->current_picture;
|
||||||
|
|
||||||
if (s->encoding) {
|
if (s->encoding) {
|
||||||
int mv_table_size= (s->mb_width+2)*(s->mb_height+2);
|
int mv_table_size= (s->mb_width+2)*(s->mb_height+2);
|
||||||
@ -843,7 +843,7 @@ static int find_unused_picture(MpegEncContext *s, int shared){
|
|||||||
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
AVVideoFrame *pic;
|
AVFrame *pic;
|
||||||
|
|
||||||
s->mb_skiped = 0;
|
s->mb_skiped = 0;
|
||||||
|
|
||||||
@ -853,7 +853,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
//printf("%8X %d %d %X %X\n", s->picture[i].data[0], s->picture[i].type, i, s->next_picture.data[0], s->last_picture.data[0]);
|
//printf("%8X %d %d %X %X\n", s->picture[i].data[0], s->picture[i].type, i, s->next_picture.data[0], s->last_picture.data[0]);
|
||||||
if(s->picture[i].data[0] == s->last_picture.data[0]){
|
if(s->picture[i].data[0] == s->last_picture.data[0]){
|
||||||
// s->picture[i].reference=0;
|
// s->picture[i].reference=0;
|
||||||
avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]);
|
avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -865,7 +865,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
for(i=0; i<MAX_PICTURE_COUNT; i++){
|
for(i=0; i<MAX_PICTURE_COUNT; i++){
|
||||||
if(s->picture[i].data[0] && s->picture[i].data[0] != s->next_picture.data[0] && s->picture[i].reference){
|
if(s->picture[i].data[0] && s->picture[i].data[0] != s->next_picture.data[0] && s->picture[i].reference){
|
||||||
fprintf(stderr, "releasing zombie picture\n");
|
fprintf(stderr, "releasing zombie picture\n");
|
||||||
avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]);
|
avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -874,7 +874,7 @@ alloc:
|
|||||||
if(!s->encoding){
|
if(!s->encoding){
|
||||||
i= find_unused_picture(s, 0);
|
i= find_unused_picture(s, 0);
|
||||||
|
|
||||||
pic= (AVVideoFrame*)&s->picture[i];
|
pic= (AVFrame*)&s->picture[i];
|
||||||
pic->reference= s->pict_type != B_TYPE;
|
pic->reference= s->pict_type != B_TYPE;
|
||||||
pic->coded_picture_number= s->current_picture.coded_picture_number+1;
|
pic->coded_picture_number= s->current_picture.coded_picture_number+1;
|
||||||
|
|
||||||
@ -946,7 +946,7 @@ void MPV_frame_end(MpegEncContext *s)
|
|||||||
/* release non refernce frames */
|
/* release non refernce frames */
|
||||||
for(i=0; i<MAX_PICTURE_COUNT; i++){
|
for(i=0; i<MAX_PICTURE_COUNT; i++){
|
||||||
if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/)
|
if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/)
|
||||||
s->avctx->release_buffer(s->avctx, (AVVideoFrame*)&s->picture[i]);
|
s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -984,8 +984,8 @@ static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int st
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int load_input_picture(MpegEncContext *s, AVVideoFrame *pic_arg){
|
static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
|
||||||
AVVideoFrame *pic;
|
AVFrame *pic;
|
||||||
int i;
|
int i;
|
||||||
const int encoding_delay= s->max_b_frames;
|
const int encoding_delay= s->max_b_frames;
|
||||||
int direct=1;
|
int direct=1;
|
||||||
@ -1000,7 +1000,7 @@ static int load_input_picture(MpegEncContext *s, AVVideoFrame *pic_arg){
|
|||||||
if(direct){
|
if(direct){
|
||||||
i= find_unused_picture(s, 1);
|
i= find_unused_picture(s, 1);
|
||||||
|
|
||||||
pic= (AVVideoFrame*)&s->picture[i];
|
pic= (AVFrame*)&s->picture[i];
|
||||||
pic->reference= 1;
|
pic->reference= 1;
|
||||||
|
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
@ -1011,7 +1011,7 @@ static int load_input_picture(MpegEncContext *s, AVVideoFrame *pic_arg){
|
|||||||
}else{
|
}else{
|
||||||
i= find_unused_picture(s, 0);
|
i= find_unused_picture(s, 0);
|
||||||
|
|
||||||
pic= (AVVideoFrame*)&s->picture[i];
|
pic= (AVFrame*)&s->picture[i];
|
||||||
pic->reference= 1;
|
pic->reference= 1;
|
||||||
|
|
||||||
alloc_picture(s, (Picture*)pic, 0);
|
alloc_picture(s, (Picture*)pic, 0);
|
||||||
@ -1194,7 +1194,7 @@ int MPV_encode_picture(AVCodecContext *avctx,
|
|||||||
unsigned char *buf, int buf_size, void *data)
|
unsigned char *buf, int buf_size, void *data)
|
||||||
{
|
{
|
||||||
MpegEncContext *s = avctx->priv_data;
|
MpegEncContext *s = avctx->priv_data;
|
||||||
AVVideoFrame *pic_arg = data;
|
AVFrame *pic_arg = data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
|
init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
|
||||||
|
@ -110,7 +110,7 @@ typedef struct ScanTable{
|
|||||||
} ScanTable;
|
} ScanTable;
|
||||||
|
|
||||||
typedef struct Picture{
|
typedef struct Picture{
|
||||||
FF_COMMON_PICTURE
|
FF_COMMON_FRAME
|
||||||
|
|
||||||
int mb_var_sum; /* sum of MB variance for current frame */
|
int mb_var_sum; /* sum of MB variance for current frame */
|
||||||
int mc_mb_var_sum; /* motion compensated MB variance for current frame */
|
int mc_mb_var_sum; /* motion compensated MB variance for current frame */
|
||||||
|
@ -24,9 +24,9 @@ typedef struct OggVorbisContext {
|
|||||||
|
|
||||||
|
|
||||||
int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avccontext) {
|
int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avccontext) {
|
||||||
if(avccontext->quality) /* VBR requested */
|
if(avccontext->coded_frame->quality) /* VBR requested */
|
||||||
return vorbis_encode_init_vbr(vi, avccontext->channels,
|
return vorbis_encode_init_vbr(vi, avccontext->channels,
|
||||||
avccontext->sample_rate, (float)avccontext->quality / 1000) ;
|
avccontext->sample_rate, (float)avccontext->coded_frame->quality / 1000) ;
|
||||||
|
|
||||||
return vorbis_encode_init(vi, avccontext->channels,
|
return vorbis_encode_init(vi, avccontext->channels,
|
||||||
avccontext->sample_rate, -1, avccontext->bit_rate, -1) ;
|
avccontext->sample_rate, -1, avccontext->bit_rate, -1) ;
|
||||||
@ -45,6 +45,9 @@ static int oggvorbis_encode_init(AVCodecContext *avccontext) {
|
|||||||
vorbis_block_init(&context->vd, &context->vb) ;
|
vorbis_block_init(&context->vd, &context->vb) ;
|
||||||
|
|
||||||
avccontext->frame_size = OGGVORBIS_FRAME_SIZE ;
|
avccontext->frame_size = OGGVORBIS_FRAME_SIZE ;
|
||||||
|
|
||||||
|
avccontext->coded_frame= avcodec_alloc_frame();
|
||||||
|
avccontext->coded_frame->key_frame= 1;
|
||||||
|
|
||||||
return 0 ;
|
return 0 ;
|
||||||
}
|
}
|
||||||
@ -113,6 +116,8 @@ static int oggvorbis_encode_close(AVCodecContext *avccontext) {
|
|||||||
vorbis_block_clear(&context->vb);
|
vorbis_block_clear(&context->vb);
|
||||||
vorbis_dsp_clear(&context->vd);
|
vorbis_dsp_clear(&context->vd);
|
||||||
vorbis_info_clear(&context->vi);
|
vorbis_info_clear(&context->vi);
|
||||||
|
|
||||||
|
av_freep(&avccontext->coded_frame);
|
||||||
|
|
||||||
return 0 ;
|
return 0 ;
|
||||||
}
|
}
|
||||||
|
@ -128,11 +128,17 @@ static int pcm_encode_init(AVCodecContext *avctx)
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
avctx->coded_frame= avcodec_alloc_frame();
|
||||||
|
avctx->coded_frame->key_frame= 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pcm_encode_close(AVCodecContext *avctx)
|
static int pcm_encode_close(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
|
av_freep(&avctx->coded_frame);
|
||||||
|
|
||||||
switch(avctx->codec->id) {
|
switch(avctx->codec->id) {
|
||||||
case CODEC_ID_PCM_ALAW:
|
case CODEC_ID_PCM_ALAW:
|
||||||
if (--linear_to_alaw_ref == 0)
|
if (--linear_to_alaw_ref == 0)
|
||||||
@ -237,7 +243,6 @@ static int pcm_encode_frame(AVCodecContext *avctx,
|
|||||||
default:
|
default:
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
avctx->key_frame = 1;
|
|
||||||
//avctx->frame_size = (dst - frame) / (sample_size * avctx->channels);
|
//avctx->frame_size = (dst - frame) / (sample_size * avctx->channels);
|
||||||
|
|
||||||
return dst - frame;
|
return dst - frame;
|
||||||
|
@ -472,7 +472,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
|
|||||||
{
|
{
|
||||||
MpegEncContext *s = avctx->priv_data;
|
MpegEncContext *s = avctx->priv_data;
|
||||||
int i;
|
int i;
|
||||||
AVVideoFrame *pict = data;
|
AVFrame *pict = data;
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
printf("*****frame %d size=%d\n", avctx->frame_number, buf_size);
|
printf("*****frame %d size=%d\n", avctx->frame_number, buf_size);
|
||||||
@ -505,9 +505,9 @@ static int rv10_decode_frame(AVCodecContext *avctx,
|
|||||||
if(s->mb_y>=s->mb_height){
|
if(s->mb_y>=s->mb_height){
|
||||||
MPV_frame_end(s);
|
MPV_frame_end(s);
|
||||||
|
|
||||||
*pict= *(AVVideoFrame*)&s->current_picture;
|
*pict= *(AVFrame*)&s->current_picture;
|
||||||
|
|
||||||
*data_size = sizeof(AVVideoFrame);
|
*data_size = sizeof(AVFrame);
|
||||||
}else{
|
}else{
|
||||||
*data_size = 0;
|
*data_size = 0;
|
||||||
}
|
}
|
||||||
|
@ -1063,7 +1063,7 @@ static int svq1_decode_frame(AVCodecContext *avctx,
|
|||||||
MpegEncContext *s=avctx->priv_data;
|
MpegEncContext *s=avctx->priv_data;
|
||||||
uint8_t *current, *previous;
|
uint8_t *current, *previous;
|
||||||
int result, i, x, y, width, height;
|
int result, i, x, y, width, height;
|
||||||
AVVideoFrame *pict = data;
|
AVFrame *pict = data;
|
||||||
|
|
||||||
/* initialize bit buffer */
|
/* initialize bit buffer */
|
||||||
init_get_bits(&s->gb,buf,buf_size);
|
init_get_bits(&s->gb,buf,buf_size);
|
||||||
@ -1161,12 +1161,12 @@ static int svq1_decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*pict = *(AVVideoFrame*)&s->current_picture;
|
*pict = *(AVFrame*)&s->current_picture;
|
||||||
|
|
||||||
|
|
||||||
MPV_frame_end(s);
|
MPV_frame_end(s);
|
||||||
|
|
||||||
*data_size=sizeof(AVVideoFrame);
|
*data_size=sizeof(AVFrame);
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ typedef struct DefaultPicOpaque{
|
|||||||
uint8_t *data[4];
|
uint8_t *data[4];
|
||||||
}DefaultPicOpaque;
|
}DefaultPicOpaque;
|
||||||
|
|
||||||
int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){
|
int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
|
||||||
int i;
|
int i;
|
||||||
const int width = s->width;
|
const int width = s->width;
|
||||||
const int height= s->height;
|
const int height= s->height;
|
||||||
@ -202,7 +202,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void avcodec_default_release_buffer(AVCodecContext *s, AVVideoFrame *pic){
|
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
assert(pic->type==FF_BUFFER_TYPE_INTERNAL);
|
assert(pic->type==FF_BUFFER_TYPE_INTERNAL);
|
||||||
@ -249,11 +249,11 @@ AVCodecContext *avcodec_alloc_context(void){
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* allocates a AVPicture and set it to defaults.
|
* allocates a AVPFrame and set it to defaults.
|
||||||
* this can be deallocated by simply calling free()
|
* this can be deallocated by simply calling free()
|
||||||
*/
|
*/
|
||||||
AVVideoFrame *avcodec_alloc_picture(void){
|
AVFrame *avcodec_alloc_frame(void){
|
||||||
AVVideoFrame *pic= av_mallocz(sizeof(AVVideoFrame));
|
AVFrame *pic= av_mallocz(sizeof(AVFrame));
|
||||||
|
|
||||||
return pic;
|
return pic;
|
||||||
}
|
}
|
||||||
@ -290,7 +290,7 @@ int avcodec_encode_audio(AVCodecContext *avctx, UINT8 *buf, int buf_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
|
int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
|
||||||
const AVVideoFrame *pict)
|
const AVFrame *pict)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -305,7 +305,7 @@ int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
|
|||||||
/* decode a frame. return -1 if error, otherwise return the number of
|
/* decode a frame. return -1 if error, otherwise return the number of
|
||||||
bytes used. If no frame could be decompressed, *got_picture_ptr is
|
bytes used. If no frame could be decompressed, *got_picture_ptr is
|
||||||
zero. Otherwise, it is non zero */
|
zero. Otherwise, it is non zero */
|
||||||
int avcodec_decode_video(AVCodecContext *avctx, AVVideoFrame *picture,
|
int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
|
||||||
int *got_picture_ptr,
|
int *got_picture_ptr,
|
||||||
UINT8 *buf, int buf_size)
|
UINT8 *buf, int buf_size)
|
||||||
{
|
{
|
||||||
@ -672,7 +672,7 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
|
|||||||
for(i=0; i<MAX_PICTURE_COUNT; i++){
|
for(i=0; i<MAX_PICTURE_COUNT; i++){
|
||||||
if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
|
if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
|
||||||
|| s->picture[i].type == FF_BUFFER_TYPE_USER))
|
|| s->picture[i].type == FF_BUFFER_TYPE_USER))
|
||||||
avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]);
|
avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -556,7 +556,7 @@ static void put_frame_header(AVFormatContext *s, ASFStream *stream, int timestam
|
|||||||
int val;
|
int val;
|
||||||
|
|
||||||
val = stream->num;
|
val = stream->num;
|
||||||
if (s->streams[val - 1]->codec.coded_picture->key_frame /* && frag_offset == 0 */)
|
if (s->streams[val - 1]->codec.coded_frame->key_frame /* && frag_offset == 0 */)
|
||||||
val |= 0x80;
|
val |= 0x80;
|
||||||
put_byte(pb, val);
|
put_byte(pb, val);
|
||||||
put_byte(pb, stream->seq);
|
put_byte(pb, stream->seq);
|
||||||
|
@ -320,7 +320,7 @@ static int avi_write_packet(AVFormatContext *s, int stream_index,
|
|||||||
if (enc->codec_type == CODEC_TYPE_VIDEO) {
|
if (enc->codec_type == CODEC_TYPE_VIDEO) {
|
||||||
tag[2] = 'd';
|
tag[2] = 'd';
|
||||||
tag[3] = 'c';
|
tag[3] = 'c';
|
||||||
flags = enc->coded_picture->key_frame ? 0x10 : 0x00;
|
flags = enc->coded_frame->key_frame ? 0x10 : 0x00;
|
||||||
} else {
|
} else {
|
||||||
tag[2] = 'w';
|
tag[2] = 'w';
|
||||||
tag[3] = 'b';
|
tag[3] = 'b';
|
||||||
|
@ -232,7 +232,7 @@ static int ffm_write_packet(AVFormatContext *s, int stream_index,
|
|||||||
/* packet size & key_frame */
|
/* packet size & key_frame */
|
||||||
header[0] = stream_index;
|
header[0] = stream_index;
|
||||||
header[1] = 0;
|
header[1] = 0;
|
||||||
if (st->codec.coded_picture && st->codec.coded_picture->key_frame)
|
if (st->codec.coded_frame->key_frame) //if st->codec.coded_frame==NULL then there is a bug somewhere else
|
||||||
header[1] |= FLAG_KEY_FRAME;
|
header[1] |= FLAG_KEY_FRAME;
|
||||||
header[2] = (size >> 16) & 0xff;
|
header[2] = (size >> 16) & 0xff;
|
||||||
header[3] = (size >> 8) & 0xff;
|
header[3] = (size >> 8) & 0xff;
|
||||||
|
@ -333,7 +333,7 @@ static int rm_write_audio(AVFormatContext *s, UINT8 *buf, int size)
|
|||||||
/* XXX: suppress this malloc */
|
/* XXX: suppress this malloc */
|
||||||
buf1= (UINT8*) av_malloc( size * sizeof(UINT8) );
|
buf1= (UINT8*) av_malloc( size * sizeof(UINT8) );
|
||||||
|
|
||||||
write_packet_header(s, stream, size, stream->enc->key_frame);
|
write_packet_header(s, stream, size, stream->enc->coded_frame->key_frame);
|
||||||
|
|
||||||
/* for AC3, the words seems to be reversed */
|
/* for AC3, the words seems to be reversed */
|
||||||
for(i=0;i<size;i+=2) {
|
for(i=0;i<size;i+=2) {
|
||||||
@ -352,7 +352,7 @@ static int rm_write_video(AVFormatContext *s, UINT8 *buf, int size)
|
|||||||
RMContext *rm = s->priv_data;
|
RMContext *rm = s->priv_data;
|
||||||
ByteIOContext *pb = &s->pb;
|
ByteIOContext *pb = &s->pb;
|
||||||
StreamInfo *stream = rm->video_stream;
|
StreamInfo *stream = rm->video_stream;
|
||||||
int key_frame = stream->enc->coded_picture->key_frame;
|
int key_frame = stream->enc->coded_frame->key_frame;
|
||||||
|
|
||||||
/* XXX: this is incorrect: should be a parameter */
|
/* XXX: this is incorrect: should be a parameter */
|
||||||
|
|
||||||
|
@ -458,7 +458,7 @@ int av_find_stream_info(AVFormatContext *ic)
|
|||||||
AVCodec *codec;
|
AVCodec *codec;
|
||||||
AVStream *st;
|
AVStream *st;
|
||||||
AVPacket *pkt;
|
AVPacket *pkt;
|
||||||
AVVideoFrame picture;
|
AVFrame picture;
|
||||||
AVPacketList *pktl=NULL, **ppktl;
|
AVPacketList *pktl=NULL, **ppktl;
|
||||||
short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2];
|
short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2];
|
||||||
UINT8 *ptr;
|
UINT8 *ptr;
|
||||||
|
Loading…
Reference in New Issue
Block a user