mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Eliminate FF_COMMON_FRAME macro.
FF_COMMON_FRAME holds the contents of the AVFrame structure and is also copied to struct Picture. Replace by an embedded AVFrame structure in struct Picture.
This commit is contained in:
parent
142e76f105
commit
657ccb5ac7
@ -741,266 +741,6 @@ typedef struct AVPanScan{
|
|||||||
int16_t position[3][2];
|
int16_t position[3][2];
|
||||||
}AVPanScan;
|
}AVPanScan;
|
||||||
|
|
||||||
#define FF_COMMON_FRAME \
|
|
||||||
/**\
|
|
||||||
* pointer to the picture planes.\
|
|
||||||
* This might be different from the first allocated byte\
|
|
||||||
* - encoding: \
|
|
||||||
* - decoding: \
|
|
||||||
*/\
|
|
||||||
uint8_t *data[4];\
|
|
||||||
int linesize[4];\
|
|
||||||
/**\
|
|
||||||
* pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.\
|
|
||||||
* This isn't used by libavcodec unless the default get/release_buffer() is used.\
|
|
||||||
* - encoding: \
|
|
||||||
* - decoding: \
|
|
||||||
*/\
|
|
||||||
uint8_t *base[4];\
|
|
||||||
/**\
|
|
||||||
* 1 -> keyframe, 0-> not\
|
|
||||||
* - encoding: Set by libavcodec.\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
int key_frame;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* Picture type of the frame, see ?_TYPE below.\
|
|
||||||
* - encoding: Set by libavcodec. for coded_picture (and set by user for input).\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
enum AVPictureType pict_type;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* presentation timestamp in time_base units (time when frame should be shown to user)\
|
|
||||||
* If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.\
|
|
||||||
* - encoding: MUST be set by user.\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
int64_t pts;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* picture number in bitstream order\
|
|
||||||
* - encoding: set by\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
int coded_picture_number;\
|
|
||||||
/**\
|
|
||||||
* picture number in display order\
|
|
||||||
* - encoding: set by\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
int display_picture_number;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* quality (between 1 (good) and FF_LAMBDA_MAX (bad)) \
|
|
||||||
* - encoding: Set by libavcodec. for coded_picture (and set by user for input).\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
int quality; \
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* buffer age (1->was last buffer and dint change, 2->..., ...).\
|
|
||||||
* Set to INT_MAX if the buffer has not been used yet.\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: MUST be set by get_buffer().\
|
|
||||||
*/\
|
|
||||||
int age;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* is this picture used as reference\
|
|
||||||
* The values for this are the same as the MpegEncContext.picture_structure\
|
|
||||||
* variable, that is 1->top field, 2->bottom field, 3->frame/both fields.\
|
|
||||||
* Set to 4 for delayed, non-reference frames.\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Set by libavcodec. (before get_buffer() call)).\
|
|
||||||
*/\
|
|
||||||
int reference;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* QP table\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
int8_t *qscale_table;\
|
|
||||||
/**\
|
|
||||||
* QP store stride\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
int qstride;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* mbskip_table[mb]>=1 if MB didn't change\
|
|
||||||
* stride= mb_width = (width+15)>>4\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
uint8_t *mbskip_table;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* motion vector table\
|
|
||||||
* @code\
|
|
||||||
* example:\
|
|
||||||
* int mv_sample_log2= 4 - motion_subsample_log2;\
|
|
||||||
* int mb_width= (width+15)>>4;\
|
|
||||||
* int mv_stride= (mb_width << mv_sample_log2) + 1;\
|
|
||||||
* motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];\
|
|
||||||
* @endcode\
|
|
||||||
* - encoding: Set by user.\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
int16_t (*motion_val[2])[2];\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* macroblock type table\
|
|
||||||
* mb_type_base + mb_width + 2\
|
|
||||||
* - encoding: Set by user.\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
uint32_t *mb_type;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* log2 of the size of the block which a single vector in motion_val represents: \
|
|
||||||
* (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
uint8_t motion_subsample_log2;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* for some private data of the user\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Set by user.\
|
|
||||||
*/\
|
|
||||||
void *opaque;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* error\
|
|
||||||
* - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.\
|
|
||||||
* - decoding: unused\
|
|
||||||
*/\
|
|
||||||
uint64_t error[4];\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* type of the buffer (to keep track of who has to deallocate data[*])\
|
|
||||||
* - encoding: Set by the one who allocates it.\
|
|
||||||
* - decoding: Set by the one who allocates it.\
|
|
||||||
* Note: User allocated (direct rendering) & internal buffers cannot coexist currently.\
|
|
||||||
*/\
|
|
||||||
int type;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* When decoding, this signals how much the picture must be delayed.\
|
|
||||||
* extra_delay = repeat_pict / (2*fps)\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
int repeat_pict;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* \
|
|
||||||
*/\
|
|
||||||
int qscale_type;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* The content of the picture is interlaced.\
|
|
||||||
* - encoding: Set by user.\
|
|
||||||
* - decoding: Set by libavcodec. (default 0)\
|
|
||||||
*/\
|
|
||||||
int interlaced_frame;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* If the content is interlaced, is top field displayed first.\
|
|
||||||
* - encoding: Set by user.\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
int top_field_first;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* Pan scan.\
|
|
||||||
* - encoding: Set by user.\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
AVPanScan *pan_scan;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* Tell user application that palette has changed from previous frame.\
|
|
||||||
* - encoding: ??? (no palette-enabled encoder yet)\
|
|
||||||
* - decoding: Set by libavcodec. (default 0).\
|
|
||||||
*/\
|
|
||||||
int palette_has_changed;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* codec suggestion on buffer type if != 0\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Set by libavcodec. (before get_buffer() call)).\
|
|
||||||
*/\
|
|
||||||
int buffer_hints;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* DCT coefficients\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
short *dct_coeff;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* motion reference frame index\
|
|
||||||
* the order in which these are stored can depend on the codec.\
|
|
||||||
* - encoding: Set by user.\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
int8_t *ref_index[2];\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* reordered opaque 64bit (generally an integer or a double precision float\
|
|
||||||
* PTS but can be anything). \
|
|
||||||
* The user sets AVCodecContext.reordered_opaque to represent the input at\
|
|
||||||
* that time,\
|
|
||||||
* the decoder reorders values as needed and sets AVFrame.reordered_opaque\
|
|
||||||
* to exactly one of the values provided by the user through AVCodecContext.reordered_opaque \
|
|
||||||
* @deprecated in favor of pkt_pts\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Read by user.\
|
|
||||||
*/\
|
|
||||||
int64_t reordered_opaque;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* hardware accelerator private data (Libav-allocated)\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Set by libavcodec\
|
|
||||||
*/\
|
|
||||||
void *hwaccel_picture_private;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* reordered pts from the last AVPacket that has been input into the decoder\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Read by user.\
|
|
||||||
*/\
|
|
||||||
int64_t pkt_pts;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* dts from the last AVPacket that has been input into the decoder\
|
|
||||||
* - encoding: unused\
|
|
||||||
* - decoding: Read by user.\
|
|
||||||
*/\
|
|
||||||
int64_t pkt_dts;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* the AVCodecContext which ff_thread_get_buffer() was last called on\
|
|
||||||
* - encoding: Set by libavcodec.\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
struct AVCodecContext *owner;\
|
|
||||||
\
|
|
||||||
/**\
|
|
||||||
* used by multithreading to store frame-specific info\
|
|
||||||
* - encoding: Set by libavcodec.\
|
|
||||||
* - decoding: Set by libavcodec.\
|
|
||||||
*/\
|
|
||||||
void *thread_opaque;\
|
|
||||||
|
|
||||||
#define FF_QSCALE_TYPE_MPEG1 0
|
#define FF_QSCALE_TYPE_MPEG1 0
|
||||||
#define FF_QSCALE_TYPE_MPEG2 1
|
#define FF_QSCALE_TYPE_MPEG2 1
|
||||||
#define FF_QSCALE_TYPE_H264 2
|
#define FF_QSCALE_TYPE_H264 2
|
||||||
@ -1095,15 +835,270 @@ typedef struct AVPacket {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Audio Video Frame.
|
* Audio Video Frame.
|
||||||
* New fields can be added to the end of FF_COMMON_FRAME with minor version
|
* New fields can be added to the end of AVFRAME with minor version
|
||||||
* bumps.
|
* bumps. Removal, reordering and changes to existing fields require
|
||||||
* Removal, reordering and changes to existing fields require a major
|
* a major version bump.
|
||||||
* version bump. No fields should be added into AVFrame before or after
|
|
||||||
* FF_COMMON_FRAME!
|
|
||||||
* sizeof(AVFrame) must not be used outside libav*.
|
* sizeof(AVFrame) must not be used outside libav*.
|
||||||
*/
|
*/
|
||||||
typedef struct AVFrame {
|
typedef struct AVFrame {
|
||||||
FF_COMMON_FRAME
|
/**
|
||||||
|
* pointer to the picture planes.
|
||||||
|
* This might be different from the first allocated byte
|
||||||
|
* - encoding:
|
||||||
|
* - decoding:
|
||||||
|
*/
|
||||||
|
uint8_t *data[4];
|
||||||
|
int linesize[4];
|
||||||
|
/**
|
||||||
|
* pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.
|
||||||
|
* This isn't used by libavcodec unless the default get/release_buffer() is used.
|
||||||
|
* - encoding:
|
||||||
|
* - decoding:
|
||||||
|
*/
|
||||||
|
uint8_t *base[4];
|
||||||
|
/**
|
||||||
|
* 1 -> keyframe, 0-> not
|
||||||
|
* - encoding: Set by libavcodec.
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
int key_frame;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Picture type of the frame, see ?_TYPE below.
|
||||||
|
* - encoding: Set by libavcodec. for coded_picture (and set by user for input).
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
enum AVPictureType pict_type;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* presentation timestamp in time_base units (time when frame should be shown to user)
|
||||||
|
* If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.
|
||||||
|
* - encoding: MUST be set by user.
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
int64_t pts;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* picture number in bitstream order
|
||||||
|
* - encoding: set by
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
int coded_picture_number;
|
||||||
|
/**
|
||||||
|
* picture number in display order
|
||||||
|
* - encoding: set by
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
int display_picture_number;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* quality (between 1 (good) and FF_LAMBDA_MAX (bad))
|
||||||
|
* - encoding: Set by libavcodec. for coded_picture (and set by user for input).
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
int quality;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* buffer age (1->was last buffer and dint change, 2->..., ...).
|
||||||
|
* Set to INT_MAX if the buffer has not been used yet.
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: MUST be set by get_buffer().
|
||||||
|
*/
|
||||||
|
int age;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* is this picture used as reference
|
||||||
|
* The values for this are the same as the MpegEncContext.picture_structure
|
||||||
|
* variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
|
||||||
|
* Set to 4 for delayed, non-reference frames.
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Set by libavcodec. (before get_buffer() call)).
|
||||||
|
*/
|
||||||
|
int reference;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* QP table
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
int8_t *qscale_table;
|
||||||
|
/**
|
||||||
|
* QP store stride
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
int qstride;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mbskip_table[mb]>=1 if MB didn't change
|
||||||
|
* stride= mb_width = (width+15)>>4
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
uint8_t *mbskip_table;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* motion vector table
|
||||||
|
* @code
|
||||||
|
* example:
|
||||||
|
* int mv_sample_log2= 4 - motion_subsample_log2;
|
||||||
|
* int mb_width= (width+15)>>4;
|
||||||
|
* int mv_stride= (mb_width << mv_sample_log2) + 1;
|
||||||
|
* motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
|
||||||
|
* @endcode
|
||||||
|
* - encoding: Set by user.
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
int16_t (*motion_val[2])[2];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* macroblock type table
|
||||||
|
* mb_type_base + mb_width + 2
|
||||||
|
* - encoding: Set by user.
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
uint32_t *mb_type;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* log2 of the size of the block which a single vector in motion_val represents:
|
||||||
|
* (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
uint8_t motion_subsample_log2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* for some private data of the user
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Set by user.
|
||||||
|
*/
|
||||||
|
void *opaque;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* error
|
||||||
|
* - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.
|
||||||
|
* - decoding: unused
|
||||||
|
*/
|
||||||
|
uint64_t error[4];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* type of the buffer (to keep track of who has to deallocate data[*])
|
||||||
|
* - encoding: Set by the one who allocates it.
|
||||||
|
* - decoding: Set by the one who allocates it.
|
||||||
|
* Note: User allocated (direct rendering) & internal buffers cannot coexist currently.
|
||||||
|
*/
|
||||||
|
int type;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* When decoding, this signals how much the picture must be delayed.
|
||||||
|
* extra_delay = repeat_pict / (2*fps)
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
int repeat_pict;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
int qscale_type;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The content of the picture is interlaced.
|
||||||
|
* - encoding: Set by user.
|
||||||
|
* - decoding: Set by libavcodec. (default 0)
|
||||||
|
*/
|
||||||
|
int interlaced_frame;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If the content is interlaced, is top field displayed first.
|
||||||
|
* - encoding: Set by user.
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
int top_field_first;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pan scan.
|
||||||
|
* - encoding: Set by user.
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
AVPanScan *pan_scan;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tell user application that palette has changed from previous frame.
|
||||||
|
* - encoding: ??? (no palette-enabled encoder yet)
|
||||||
|
* - decoding: Set by libavcodec. (default 0).
|
||||||
|
*/
|
||||||
|
int palette_has_changed;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* codec suggestion on buffer type if != 0
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Set by libavcodec. (before get_buffer() call)).
|
||||||
|
*/
|
||||||
|
int buffer_hints;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DCT coefficients
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
short *dct_coeff;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* motion reference frame index
|
||||||
|
* the order in which these are stored can depend on the codec.
|
||||||
|
* - encoding: Set by user.
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
int8_t *ref_index[2];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* reordered opaque 64bit (generally an integer or a double precision float
|
||||||
|
* PTS but can be anything).
|
||||||
|
* The user sets AVCodecContext.reordered_opaque to represent the input at
|
||||||
|
* that time,
|
||||||
|
* the decoder reorders values as needed and sets AVFrame.reordered_opaque
|
||||||
|
* to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
|
||||||
|
* @deprecated in favor of pkt_pts
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Read by user.
|
||||||
|
*/
|
||||||
|
int64_t reordered_opaque;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hardware accelerator private data (Libav-allocated)
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Set by libavcodec
|
||||||
|
*/
|
||||||
|
void *hwaccel_picture_private;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* reordered pts from the last AVPacket that has been input into the decoder
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Read by user.
|
||||||
|
*/
|
||||||
|
int64_t pkt_pts;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dts from the last AVPacket that has been input into the decoder
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Read by user.
|
||||||
|
*/
|
||||||
|
int64_t pkt_dts;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the AVCodecContext which ff_thread_get_buffer() was last called on
|
||||||
|
* - encoding: Set by libavcodec.
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
struct AVCodecContext *owner;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* used by multithreading to store frame-specific info
|
||||||
|
* - encoding: Set by libavcodec.
|
||||||
|
* - decoding: Set by libavcodec.
|
||||||
|
*/
|
||||||
|
void *thread_opaque;
|
||||||
} AVFrame;
|
} AVFrame;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -333,9 +333,9 @@ static inline void mc_dir_part(AVSContext *h,Picture *pic,int square,
|
|||||||
const int mx= mv->x + src_x_offset*8;
|
const int mx= mv->x + src_x_offset*8;
|
||||||
const int my= mv->y + src_y_offset*8;
|
const int my= mv->y + src_y_offset*8;
|
||||||
const int luma_xy= (mx&3) + ((my&3)<<2);
|
const int luma_xy= (mx&3) + ((my&3)<<2);
|
||||||
uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->l_stride;
|
uint8_t * src_y = pic->f.data[0] + (mx >> 2) + (my >> 2) * h->l_stride;
|
||||||
uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->c_stride;
|
uint8_t * src_cb = pic->f.data[1] + (mx >> 3) + (my >> 3) * h->c_stride;
|
||||||
uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->c_stride;
|
uint8_t * src_cr = pic->f.data[2] + (mx >> 3) + (my >> 3) * h->c_stride;
|
||||||
int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
|
int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
|
||||||
int extra_height= extra_width;
|
int extra_height= extra_width;
|
||||||
int emu=0;
|
int emu=0;
|
||||||
@ -344,7 +344,7 @@ static inline void mc_dir_part(AVSContext *h,Picture *pic,int square,
|
|||||||
const int pic_width = 16*h->mb_width;
|
const int pic_width = 16*h->mb_width;
|
||||||
const int pic_height = 16*h->mb_height;
|
const int pic_height = 16*h->mb_height;
|
||||||
|
|
||||||
if(!pic->data[0])
|
if(!pic->f.data[0])
|
||||||
return;
|
return;
|
||||||
if(mx&7) extra_width -= 3;
|
if(mx&7) extra_width -= 3;
|
||||||
if(my&7) extra_height -= 3;
|
if(my&7) extra_height -= 3;
|
||||||
@ -602,9 +602,9 @@ int ff_cavs_next_mb(AVSContext *h) {
|
|||||||
h->mbx = 0;
|
h->mbx = 0;
|
||||||
h->mby++;
|
h->mby++;
|
||||||
/* re-calculate sample pointers */
|
/* re-calculate sample pointers */
|
||||||
h->cy = h->picture.data[0] + h->mby*16*h->l_stride;
|
h->cy = h->picture.f.data[0] + h->mby * 16 * h->l_stride;
|
||||||
h->cu = h->picture.data[1] + h->mby*8*h->c_stride;
|
h->cu = h->picture.f.data[1] + h->mby * 8 * h->c_stride;
|
||||||
h->cv = h->picture.data[2] + h->mby*8*h->c_stride;
|
h->cv = h->picture.f.data[2] + h->mby * 8 * h->c_stride;
|
||||||
if(h->mby == h->mb_height) { //frame end
|
if(h->mby == h->mb_height) { //frame end
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -629,11 +629,11 @@ void ff_cavs_init_pic(AVSContext *h) {
|
|||||||
h->mv[MV_FWD_X0] = ff_cavs_dir_mv;
|
h->mv[MV_FWD_X0] = ff_cavs_dir_mv;
|
||||||
set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
|
set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
|
||||||
h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
|
h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
|
||||||
h->cy = h->picture.data[0];
|
h->cy = h->picture.f.data[0];
|
||||||
h->cu = h->picture.data[1];
|
h->cu = h->picture.f.data[1];
|
||||||
h->cv = h->picture.data[2];
|
h->cv = h->picture.f.data[2];
|
||||||
h->l_stride = h->picture.linesize[0];
|
h->l_stride = h->picture.f.linesize[0];
|
||||||
h->c_stride = h->picture.linesize[1];
|
h->c_stride = h->picture.f.linesize[1];
|
||||||
h->luma_scan[2] = 8*h->l_stride;
|
h->luma_scan[2] = 8*h->l_stride;
|
||||||
h->luma_scan[3] = 8*h->l_stride+8;
|
h->luma_scan[3] = 8*h->l_stride+8;
|
||||||
h->mbx = h->mby = h->mbidx = 0;
|
h->mbx = h->mby = h->mbidx = 0;
|
||||||
|
@ -476,8 +476,8 @@ static int decode_pic(AVSContext *h) {
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
/* make sure we have the reference frames we need */
|
/* make sure we have the reference frames we need */
|
||||||
if(!h->DPB[0].data[0] ||
|
if(!h->DPB[0].f.data[0] ||
|
||||||
(!h->DPB[1].data[0] && h->pic_type == AV_PICTURE_TYPE_B))
|
(!h->DPB[1].f.data[0] && h->pic_type == AV_PICTURE_TYPE_B))
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
h->pic_type = AV_PICTURE_TYPE_I;
|
h->pic_type = AV_PICTURE_TYPE_I;
|
||||||
@ -494,7 +494,7 @@ static int decode_pic(AVSContext *h) {
|
|||||||
skip_bits(&s->gb,1); //marker_bit
|
skip_bits(&s->gb,1); //marker_bit
|
||||||
}
|
}
|
||||||
/* release last B frame */
|
/* release last B frame */
|
||||||
if(h->picture.data[0])
|
if(h->picture.f.data[0])
|
||||||
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture);
|
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture);
|
||||||
|
|
||||||
s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture);
|
s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture);
|
||||||
@ -585,7 +585,7 @@ static int decode_pic(AVSContext *h) {
|
|||||||
} while(ff_cavs_next_mb(h));
|
} while(ff_cavs_next_mb(h));
|
||||||
}
|
}
|
||||||
if(h->pic_type != AV_PICTURE_TYPE_B) {
|
if(h->pic_type != AV_PICTURE_TYPE_B) {
|
||||||
if(h->DPB[1].data[0])
|
if(h->DPB[1].f.data[0])
|
||||||
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
|
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
|
||||||
h->DPB[1] = h->DPB[0];
|
h->DPB[1] = h->DPB[0];
|
||||||
h->DPB[0] = h->picture;
|
h->DPB[0] = h->picture;
|
||||||
@ -648,7 +648,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
|
|||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
|
|
||||||
if (buf_size == 0) {
|
if (buf_size == 0) {
|
||||||
if(!s->low_delay && h->DPB[0].data[0]) {
|
if (!s->low_delay && h->DPB[0].f.data[0]) {
|
||||||
*data_size = sizeof(AVPicture);
|
*data_size = sizeof(AVPicture);
|
||||||
*picture = *(AVFrame *) &h->DPB[0];
|
*picture = *(AVFrame *) &h->DPB[0];
|
||||||
}
|
}
|
||||||
@ -669,9 +669,9 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
|
|||||||
break;
|
break;
|
||||||
case PIC_I_START_CODE:
|
case PIC_I_START_CODE:
|
||||||
if(!h->got_keyframe) {
|
if(!h->got_keyframe) {
|
||||||
if(h->DPB[0].data[0])
|
if(h->DPB[0].f.data[0])
|
||||||
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]);
|
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]);
|
||||||
if(h->DPB[1].data[0])
|
if(h->DPB[1].f.data[0])
|
||||||
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]);
|
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]);
|
||||||
h->got_keyframe = 1;
|
h->got_keyframe = 1;
|
||||||
}
|
}
|
||||||
@ -685,7 +685,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
|
|||||||
break;
|
break;
|
||||||
*data_size = sizeof(AVPicture);
|
*data_size = sizeof(AVPicture);
|
||||||
if(h->pic_type != AV_PICTURE_TYPE_B) {
|
if(h->pic_type != AV_PICTURE_TYPE_B) {
|
||||||
if(h->DPB[1].data[0]) {
|
if(h->DPB[1].f.data[0]) {
|
||||||
*picture = *(AVFrame *) &h->DPB[1];
|
*picture = *(AVFrame *) &h->DPB[1];
|
||||||
} else {
|
} else {
|
||||||
*data_size = 0;
|
*data_size = 0;
|
||||||
|
@ -41,9 +41,9 @@
|
|||||||
#undef mb_intra
|
#undef mb_intra
|
||||||
|
|
||||||
static void decode_mb(MpegEncContext *s, int ref){
|
static void decode_mb(MpegEncContext *s, int ref){
|
||||||
s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* s->linesize ) + s->mb_x * 16;
|
s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
|
||||||
s->dest[1] = s->current_picture.data[1] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift);
|
s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
|
||||||
s->dest[2] = s->current_picture.data[2] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift);
|
s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
|
||||||
|
|
||||||
if(CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264){
|
if(CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264){
|
||||||
H264Context *h= (void*)s;
|
H264Context *h= (void*)s;
|
||||||
@ -52,7 +52,7 @@ static void decode_mb(MpegEncContext *s, int ref){
|
|||||||
assert(ref>=0);
|
assert(ref>=0);
|
||||||
if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added
|
if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added
|
||||||
ref=0;
|
ref=0;
|
||||||
fill_rectangle(&s->current_picture.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
|
fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
|
||||||
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
|
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
|
||||||
fill_rectangle(h->mv_cache[0][ scan8[0] ], 4, 4, 8, pack16to32(s->mv[0][0][0],s->mv[0][0][1]), 4);
|
fill_rectangle(h->mv_cache[0][ scan8[0] ], 4, 4, 8, pack16to32(s->mv[0][0][0],s->mv[0][0][1]), 4);
|
||||||
assert(!FRAME_MBAFF);
|
assert(!FRAME_MBAFF);
|
||||||
@ -166,14 +166,14 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
|
|||||||
|
|
||||||
error= s->error_status_table[mb_index];
|
error= s->error_status_table[mb_index];
|
||||||
|
|
||||||
if(IS_INTER(s->current_picture.mb_type[mb_index])) continue; //inter
|
if(IS_INTER(s->current_picture.f.mb_type[mb_index])) continue; //inter
|
||||||
if(!(error&DC_ERROR)) continue; //dc-ok
|
if(!(error&DC_ERROR)) continue; //dc-ok
|
||||||
|
|
||||||
/* right block */
|
/* right block */
|
||||||
for(j=b_x+1; j<w; j++){
|
for(j=b_x+1; j<w; j++){
|
||||||
int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
|
int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
|
||||||
int error_j= s->error_status_table[mb_index_j];
|
int error_j= s->error_status_table[mb_index_j];
|
||||||
int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
|
int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
|
||||||
if(intra_j==0 || !(error_j&DC_ERROR)){
|
if(intra_j==0 || !(error_j&DC_ERROR)){
|
||||||
color[0]= dc[j + b_y*stride];
|
color[0]= dc[j + b_y*stride];
|
||||||
distance[0]= j-b_x;
|
distance[0]= j-b_x;
|
||||||
@ -185,7 +185,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
|
|||||||
for(j=b_x-1; j>=0; j--){
|
for(j=b_x-1; j>=0; j--){
|
||||||
int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
|
int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
|
||||||
int error_j= s->error_status_table[mb_index_j];
|
int error_j= s->error_status_table[mb_index_j];
|
||||||
int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
|
int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
|
||||||
if(intra_j==0 || !(error_j&DC_ERROR)){
|
if(intra_j==0 || !(error_j&DC_ERROR)){
|
||||||
color[1]= dc[j + b_y*stride];
|
color[1]= dc[j + b_y*stride];
|
||||||
distance[1]= b_x-j;
|
distance[1]= b_x-j;
|
||||||
@ -197,7 +197,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
|
|||||||
for(j=b_y+1; j<h; j++){
|
for(j=b_y+1; j<h; j++){
|
||||||
int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
|
int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
|
||||||
int error_j= s->error_status_table[mb_index_j];
|
int error_j= s->error_status_table[mb_index_j];
|
||||||
int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
|
int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
|
||||||
if(intra_j==0 || !(error_j&DC_ERROR)){
|
if(intra_j==0 || !(error_j&DC_ERROR)){
|
||||||
color[2]= dc[b_x + j*stride];
|
color[2]= dc[b_x + j*stride];
|
||||||
distance[2]= j-b_y;
|
distance[2]= j-b_y;
|
||||||
@ -209,7 +209,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
|
|||||||
for(j=b_y-1; j>=0; j--){
|
for(j=b_y-1; j>=0; j--){
|
||||||
int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
|
int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
|
||||||
int error_j= s->error_status_table[mb_index_j];
|
int error_j= s->error_status_table[mb_index_j];
|
||||||
int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
|
int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
|
||||||
if(intra_j==0 || !(error_j&DC_ERROR)){
|
if(intra_j==0 || !(error_j&DC_ERROR)){
|
||||||
color[3]= dc[b_x + j*stride];
|
color[3]= dc[b_x + j*stride];
|
||||||
distance[3]= b_y-j;
|
distance[3]= b_y-j;
|
||||||
@ -248,13 +248,13 @@ static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
|
|||||||
int y;
|
int y;
|
||||||
int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride];
|
int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride];
|
||||||
int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
|
int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
|
||||||
int left_intra= IS_INTRA(s->current_picture.mb_type [( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride]);
|
int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
|
||||||
int right_intra= IS_INTRA(s->current_picture.mb_type [((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride]);
|
int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
|
||||||
int left_damage = left_status&(DC_ERROR|AC_ERROR|MV_ERROR);
|
int left_damage = left_status&(DC_ERROR|AC_ERROR|MV_ERROR);
|
||||||
int right_damage= right_status&(DC_ERROR|AC_ERROR|MV_ERROR);
|
int right_damage= right_status&(DC_ERROR|AC_ERROR|MV_ERROR);
|
||||||
int offset= b_x*8 + b_y*stride*8;
|
int offset= b_x*8 + b_y*stride*8;
|
||||||
int16_t *left_mv= s->current_picture.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ];
|
int16_t *left_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ];
|
||||||
int16_t *right_mv= s->current_picture.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
|
int16_t *right_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
|
||||||
|
|
||||||
if(!(left_damage||right_damage)) continue; // both undamaged
|
if(!(left_damage||right_damage)) continue; // both undamaged
|
||||||
|
|
||||||
@ -311,13 +311,13 @@ static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
|
|||||||
int x;
|
int x;
|
||||||
int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride];
|
int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride];
|
||||||
int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
|
int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
|
||||||
int top_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride]);
|
int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
|
||||||
int bottom_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride]);
|
int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
|
||||||
int top_damage = top_status&(DC_ERROR|AC_ERROR|MV_ERROR);
|
int top_damage = top_status&(DC_ERROR|AC_ERROR|MV_ERROR);
|
||||||
int bottom_damage= bottom_status&(DC_ERROR|AC_ERROR|MV_ERROR);
|
int bottom_damage= bottom_status&(DC_ERROR|AC_ERROR|MV_ERROR);
|
||||||
int offset= b_x*8 + b_y*stride*8;
|
int offset= b_x*8 + b_y*stride*8;
|
||||||
int16_t *top_mv= s->current_picture.motion_val[0][mvy_stride* b_y + mvx_stride*b_x];
|
int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
|
||||||
int16_t *bottom_mv= s->current_picture.motion_val[0][mvy_stride*(b_y+1) + mvx_stride*b_x];
|
int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
|
||||||
|
|
||||||
if(!(top_damage||bottom_damage)) continue; // both undamaged
|
if(!(top_damage||bottom_damage)) continue; // both undamaged
|
||||||
|
|
||||||
@ -376,7 +376,7 @@ static void guess_mv(MpegEncContext *s){
|
|||||||
int f=0;
|
int f=0;
|
||||||
int error= s->error_status_table[mb_xy];
|
int error= s->error_status_table[mb_xy];
|
||||||
|
|
||||||
if(IS_INTRA(s->current_picture.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
|
if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
|
||||||
if(!(error&MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV
|
if(!(error&MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV
|
||||||
|
|
||||||
fixed[mb_xy]= f;
|
fixed[mb_xy]= f;
|
||||||
@ -389,10 +389,10 @@ static void guess_mv(MpegEncContext *s){
|
|||||||
for(mb_x=0; mb_x<s->mb_width; mb_x++){
|
for(mb_x=0; mb_x<s->mb_width; mb_x++){
|
||||||
const int mb_xy= mb_x + mb_y*s->mb_stride;
|
const int mb_xy= mb_x + mb_y*s->mb_stride;
|
||||||
|
|
||||||
if(IS_INTRA(s->current_picture.mb_type[mb_xy])) continue;
|
if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) continue;
|
||||||
if(!(s->error_status_table[mb_xy]&MV_ERROR)) continue;
|
if(!(s->error_status_table[mb_xy]&MV_ERROR)) continue;
|
||||||
|
|
||||||
s->mv_dir = s->last_picture.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
|
s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
|
||||||
s->mb_intra=0;
|
s->mb_intra=0;
|
||||||
s->mv_type = MV_TYPE_16X16;
|
s->mv_type = MV_TYPE_16X16;
|
||||||
s->mb_skipped=0;
|
s->mb_skipped=0;
|
||||||
@ -434,8 +434,8 @@ int score_sum=0;
|
|||||||
if((mb_x^mb_y^pass)&1) continue;
|
if((mb_x^mb_y^pass)&1) continue;
|
||||||
|
|
||||||
if(fixed[mb_xy]==MV_FROZEN) continue;
|
if(fixed[mb_xy]==MV_FROZEN) continue;
|
||||||
assert(!IS_INTRA(s->current_picture.mb_type[mb_xy]));
|
assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
|
||||||
assert(s->last_picture_ptr && s->last_picture_ptr->data[0]);
|
assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
|
||||||
|
|
||||||
j=0;
|
j=0;
|
||||||
if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1;
|
if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1;
|
||||||
@ -454,27 +454,27 @@ int score_sum=0;
|
|||||||
none_left=0;
|
none_left=0;
|
||||||
|
|
||||||
if(mb_x>0 && fixed[mb_xy-1]){
|
if(mb_x>0 && fixed[mb_xy-1]){
|
||||||
mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_step][0];
|
mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_step][0];
|
||||||
mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_step][1];
|
mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_step][1];
|
||||||
ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy-1)];
|
ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-1)];
|
||||||
pred_count++;
|
pred_count++;
|
||||||
}
|
}
|
||||||
if(mb_x+1<mb_width && fixed[mb_xy+1]){
|
if(mb_x+1<mb_width && fixed[mb_xy+1]){
|
||||||
mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_step][0];
|
mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_step][0];
|
||||||
mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_step][1];
|
mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_step][1];
|
||||||
ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy+1)];
|
ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+1)];
|
||||||
pred_count++;
|
pred_count++;
|
||||||
}
|
}
|
||||||
if(mb_y>0 && fixed[mb_xy-mb_stride]){
|
if(mb_y>0 && fixed[mb_xy-mb_stride]){
|
||||||
mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_stride*mot_step][0];
|
mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][0];
|
||||||
mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_stride*mot_step][1];
|
mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][1];
|
||||||
ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy-s->mb_stride)];
|
ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-s->mb_stride)];
|
||||||
pred_count++;
|
pred_count++;
|
||||||
}
|
}
|
||||||
if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
|
if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
|
||||||
mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_stride*mot_step][0];
|
mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][0];
|
||||||
mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_stride*mot_step][1];
|
mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][1];
|
||||||
ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy+s->mb_stride)];
|
ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+s->mb_stride)];
|
||||||
pred_count++;
|
pred_count++;
|
||||||
}
|
}
|
||||||
if(pred_count==0) continue;
|
if(pred_count==0) continue;
|
||||||
@ -534,16 +534,16 @@ skip_mean_and_median:
|
|||||||
ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
|
ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
|
||||||
mb_y, 0);
|
mb_y, 0);
|
||||||
}
|
}
|
||||||
if (!s->last_picture.motion_val[0] ||
|
if (!s->last_picture.f.motion_val[0] ||
|
||||||
!s->last_picture.ref_index[0])
|
!s->last_picture.f.ref_index[0])
|
||||||
goto skip_last_mv;
|
goto skip_last_mv;
|
||||||
prev_x = s->last_picture.motion_val[0][mot_index][0];
|
prev_x = s->last_picture.f.motion_val[0][mot_index][0];
|
||||||
prev_y = s->last_picture.motion_val[0][mot_index][1];
|
prev_y = s->last_picture.f.motion_val[0][mot_index][1];
|
||||||
prev_ref = s->last_picture.ref_index[0][4*mb_xy];
|
prev_ref = s->last_picture.f.ref_index[0][4*mb_xy];
|
||||||
} else {
|
} else {
|
||||||
prev_x = s->current_picture.motion_val[0][mot_index][0];
|
prev_x = s->current_picture.f.motion_val[0][mot_index][0];
|
||||||
prev_y = s->current_picture.motion_val[0][mot_index][1];
|
prev_y = s->current_picture.f.motion_val[0][mot_index][1];
|
||||||
prev_ref = s->current_picture.ref_index[0][4*mb_xy];
|
prev_ref = s->current_picture.f.ref_index[0][4*mb_xy];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* last MV */
|
/* last MV */
|
||||||
@ -565,10 +565,10 @@ skip_mean_and_median:
|
|||||||
|
|
||||||
for(j=0; j<pred_count; j++){
|
for(j=0; j<pred_count; j++){
|
||||||
int score=0;
|
int score=0;
|
||||||
uint8_t *src= s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
|
uint8_t *src = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
|
||||||
|
|
||||||
s->current_picture.motion_val[0][mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0];
|
s->current_picture.f.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0];
|
||||||
s->current_picture.motion_val[0][mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1];
|
s->current_picture.f.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1];
|
||||||
|
|
||||||
if(ref[j]<0) //predictor intra or otherwise not available
|
if(ref[j]<0) //predictor intra or otherwise not available
|
||||||
continue;
|
continue;
|
||||||
@ -607,8 +607,8 @@ score_sum+= best_score;
|
|||||||
|
|
||||||
for(i=0; i<mot_step; i++)
|
for(i=0; i<mot_step; i++)
|
||||||
for(j=0; j<mot_step; j++){
|
for(j=0; j<mot_step; j++){
|
||||||
s->current_picture.motion_val[0][mot_index+i+j*mot_stride][0]= s->mv[0][0][0];
|
s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
|
||||||
s->current_picture.motion_val[0][mot_index+i+j*mot_stride][1]= s->mv[0][0][1];
|
s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
|
||||||
}
|
}
|
||||||
|
|
||||||
decode_mb(s, ref[best_pred]);
|
decode_mb(s, ref[best_pred]);
|
||||||
@ -640,7 +640,7 @@ score_sum+= best_score;
|
|||||||
static int is_intra_more_likely(MpegEncContext *s){
|
static int is_intra_more_likely(MpegEncContext *s){
|
||||||
int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
|
int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
|
||||||
|
|
||||||
if(!s->last_picture_ptr || !s->last_picture_ptr->data[0]) return 1; //no previous frame available -> use spatial prediction
|
if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) return 1; //no previous frame available -> use spatial prediction
|
||||||
|
|
||||||
undamaged_count=0;
|
undamaged_count=0;
|
||||||
for(i=0; i<s->mb_num; i++){
|
for(i=0; i<s->mb_num; i++){
|
||||||
@ -652,7 +652,7 @@ static int is_intra_more_likely(MpegEncContext *s){
|
|||||||
|
|
||||||
if(s->codec_id == CODEC_ID_H264){
|
if(s->codec_id == CODEC_ID_H264){
|
||||||
H264Context *h= (void*)s;
|
H264Context *h= (void*)s;
|
||||||
if(h->ref_count[0] <= 0 || !h->ref_list[0][0].data[0])
|
if (h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0])
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -679,8 +679,8 @@ static int is_intra_more_likely(MpegEncContext *s){
|
|||||||
if((j%skip_amount) != 0) continue; //skip a few to speed things up
|
if((j%skip_amount) != 0) continue; //skip a few to speed things up
|
||||||
|
|
||||||
if(s->pict_type==AV_PICTURE_TYPE_I){
|
if(s->pict_type==AV_PICTURE_TYPE_I){
|
||||||
uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
|
uint8_t *mb_ptr = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
|
||||||
uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
|
uint8_t *last_mb_ptr= s->last_picture.f.data [0] + mb_x*16 + mb_y*16*s->linesize;
|
||||||
|
|
||||||
if (s->avctx->codec_id == CODEC_ID_H264) {
|
if (s->avctx->codec_id == CODEC_ID_H264) {
|
||||||
// FIXME
|
// FIXME
|
||||||
@ -691,7 +691,7 @@ static int is_intra_more_likely(MpegEncContext *s){
|
|||||||
is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
|
is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
|
||||||
is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
|
is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
|
||||||
}else{
|
}else{
|
||||||
if(IS_INTRA(s->current_picture.mb_type[mb_xy]))
|
if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
|
||||||
is_intra_likely++;
|
is_intra_likely++;
|
||||||
else
|
else
|
||||||
is_intra_likely--;
|
is_intra_likely--;
|
||||||
@ -793,15 +793,15 @@ void ff_er_frame_end(MpegEncContext *s){
|
|||||||
s->picture_structure != PICT_FRAME || // we dont support ER of field pictures yet, though it should not crash if enabled
|
s->picture_structure != PICT_FRAME || // we dont support ER of field pictures yet, though it should not crash if enabled
|
||||||
s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
|
s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
|
||||||
|
|
||||||
if(s->current_picture.motion_val[0] == NULL){
|
if (s->current_picture.f.motion_val[0] == NULL) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
|
||||||
|
|
||||||
for(i=0; i<2; i++){
|
for(i=0; i<2; i++){
|
||||||
pic->ref_index[i]= av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
|
pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
|
||||||
pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t));
|
pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t));
|
||||||
pic->motion_val[i]= pic->motion_val_base[i]+4;
|
pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
|
||||||
}
|
}
|
||||||
pic->motion_subsample_log2= 3;
|
pic->f.motion_subsample_log2 = 3;
|
||||||
s->current_picture= *s->current_picture_ptr;
|
s->current_picture= *s->current_picture_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -951,25 +951,25 @@ void ff_er_frame_end(MpegEncContext *s){
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
if(is_intra_likely)
|
if(is_intra_likely)
|
||||||
s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4;
|
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
|
||||||
else
|
else
|
||||||
s->current_picture.mb_type[mb_xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// change inter to intra blocks if no reference frames are available
|
// change inter to intra blocks if no reference frames are available
|
||||||
if (!s->last_picture.data[0] && !s->next_picture.data[0])
|
if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
|
||||||
for(i=0; i<s->mb_num; i++){
|
for(i=0; i<s->mb_num; i++){
|
||||||
const int mb_xy= s->mb_index2xy[i];
|
const int mb_xy= s->mb_index2xy[i];
|
||||||
if(!IS_INTRA(s->current_picture.mb_type[mb_xy]))
|
if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
|
||||||
s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4;
|
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* handle inter blocks with damaged AC */
|
/* handle inter blocks with damaged AC */
|
||||||
for(mb_y=0; mb_y<s->mb_height; mb_y++){
|
for(mb_y=0; mb_y<s->mb_height; mb_y++){
|
||||||
for(mb_x=0; mb_x<s->mb_width; mb_x++){
|
for(mb_x=0; mb_x<s->mb_width; mb_x++){
|
||||||
const int mb_xy= mb_x + mb_y * s->mb_stride;
|
const int mb_xy= mb_x + mb_y * s->mb_stride;
|
||||||
const int mb_type= s->current_picture.mb_type[mb_xy];
|
const int mb_type= s->current_picture.f.mb_type[mb_xy];
|
||||||
int dir = !s->last_picture.data[0];
|
int dir = !s->last_picture.f.data[0];
|
||||||
error= s->error_status_table[mb_xy];
|
error= s->error_status_table[mb_xy];
|
||||||
|
|
||||||
if(IS_INTRA(mb_type)) continue; //intra
|
if(IS_INTRA(mb_type)) continue; //intra
|
||||||
@ -984,13 +984,13 @@ void ff_er_frame_end(MpegEncContext *s){
|
|||||||
int j;
|
int j;
|
||||||
s->mv_type = MV_TYPE_8X8;
|
s->mv_type = MV_TYPE_8X8;
|
||||||
for(j=0; j<4; j++){
|
for(j=0; j<4; j++){
|
||||||
s->mv[0][j][0] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][0];
|
s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
|
||||||
s->mv[0][j][1] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][1];
|
s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
s->mv_type = MV_TYPE_16X16;
|
s->mv_type = MV_TYPE_16X16;
|
||||||
s->mv[0][0][0] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
|
s->mv[0][0][0] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
|
||||||
s->mv[0][0][1] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
|
s->mv[0][0][1] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
|
||||||
}
|
}
|
||||||
|
|
||||||
s->dsp.clear_blocks(s->block[0]);
|
s->dsp.clear_blocks(s->block[0]);
|
||||||
@ -1007,7 +1007,7 @@ void ff_er_frame_end(MpegEncContext *s){
|
|||||||
for(mb_x=0; mb_x<s->mb_width; mb_x++){
|
for(mb_x=0; mb_x<s->mb_width; mb_x++){
|
||||||
int xy= mb_x*2 + mb_y*2*s->b8_stride;
|
int xy= mb_x*2 + mb_y*2*s->b8_stride;
|
||||||
const int mb_xy= mb_x + mb_y * s->mb_stride;
|
const int mb_xy= mb_x + mb_y * s->mb_stride;
|
||||||
const int mb_type= s->current_picture.mb_type[mb_xy];
|
const int mb_type= s->current_picture.f.mb_type[mb_xy];
|
||||||
error= s->error_status_table[mb_xy];
|
error= s->error_status_table[mb_xy];
|
||||||
|
|
||||||
if(IS_INTRA(mb_type)) continue;
|
if(IS_INTRA(mb_type)) continue;
|
||||||
@ -1015,8 +1015,8 @@ void ff_er_frame_end(MpegEncContext *s){
|
|||||||
if(!(error&AC_ERROR)) continue; //undamaged inter
|
if(!(error&AC_ERROR)) continue; //undamaged inter
|
||||||
|
|
||||||
s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD;
|
s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD;
|
||||||
if(!s->last_picture.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
|
if(!s->last_picture.f.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
|
||||||
if(!s->next_picture.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
|
if(!s->next_picture.f.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
|
||||||
s->mb_intra=0;
|
s->mb_intra=0;
|
||||||
s->mv_type = MV_TYPE_16X16;
|
s->mv_type = MV_TYPE_16X16;
|
||||||
s->mb_skipped=0;
|
s->mb_skipped=0;
|
||||||
@ -1031,10 +1031,10 @@ void ff_er_frame_end(MpegEncContext *s){
|
|||||||
ff_thread_await_progress((AVFrame *) s->next_picture_ptr,
|
ff_thread_await_progress((AVFrame *) s->next_picture_ptr,
|
||||||
mb_y, 0);
|
mb_y, 0);
|
||||||
}
|
}
|
||||||
s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp;
|
s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
|
||||||
s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp;
|
s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
|
||||||
s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp;
|
s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
|
||||||
s->mv[1][0][1] = s->next_picture.motion_val[0][xy][1]*(time_pb - time_pp)/time_pp;
|
s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
|
||||||
}else{
|
}else{
|
||||||
s->mv[0][0][0]= 0;
|
s->mv[0][0][0]= 0;
|
||||||
s->mv[0][0][1]= 0;
|
s->mv[0][0][1]= 0;
|
||||||
@ -1061,16 +1061,16 @@ void ff_er_frame_end(MpegEncContext *s){
|
|||||||
int16_t *dc_ptr;
|
int16_t *dc_ptr;
|
||||||
uint8_t *dest_y, *dest_cb, *dest_cr;
|
uint8_t *dest_y, *dest_cb, *dest_cr;
|
||||||
const int mb_xy= mb_x + mb_y * s->mb_stride;
|
const int mb_xy= mb_x + mb_y * s->mb_stride;
|
||||||
const int mb_type= s->current_picture.mb_type[mb_xy];
|
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||||
|
|
||||||
error= s->error_status_table[mb_xy];
|
error= s->error_status_table[mb_xy];
|
||||||
|
|
||||||
if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
|
if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
|
||||||
// if(error&MV_ERROR) continue; //inter data damaged FIXME is this good?
|
// if(error&MV_ERROR) continue; //inter data damaged FIXME is this good?
|
||||||
|
|
||||||
dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
|
dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
|
||||||
dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
|
dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
|
||||||
dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
|
dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
|
||||||
|
|
||||||
dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride];
|
dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride];
|
||||||
for(n=0; n<4; n++){
|
for(n=0; n<4; n++){
|
||||||
@ -1110,16 +1110,16 @@ void ff_er_frame_end(MpegEncContext *s){
|
|||||||
for(mb_x=0; mb_x<s->mb_width; mb_x++){
|
for(mb_x=0; mb_x<s->mb_width; mb_x++){
|
||||||
uint8_t *dest_y, *dest_cb, *dest_cr;
|
uint8_t *dest_y, *dest_cb, *dest_cr;
|
||||||
const int mb_xy= mb_x + mb_y * s->mb_stride;
|
const int mb_xy= mb_x + mb_y * s->mb_stride;
|
||||||
const int mb_type= s->current_picture.mb_type[mb_xy];
|
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||||
|
|
||||||
error= s->error_status_table[mb_xy];
|
error= s->error_status_table[mb_xy];
|
||||||
|
|
||||||
if(IS_INTER(mb_type)) continue;
|
if(IS_INTER(mb_type)) continue;
|
||||||
if(!(error&AC_ERROR)) continue; //undamaged
|
if(!(error&AC_ERROR)) continue; //undamaged
|
||||||
|
|
||||||
dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
|
dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
|
||||||
dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
|
dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
|
||||||
dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
|
dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
|
||||||
|
|
||||||
put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
|
put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
|
||||||
}
|
}
|
||||||
@ -1127,14 +1127,14 @@ void ff_er_frame_end(MpegEncContext *s){
|
|||||||
|
|
||||||
if(s->avctx->error_concealment&FF_EC_DEBLOCK){
|
if(s->avctx->error_concealment&FF_EC_DEBLOCK){
|
||||||
/* filter horizontal block boundaries */
|
/* filter horizontal block boundaries */
|
||||||
h_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
|
h_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
|
||||||
h_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
|
h_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
|
||||||
h_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
|
h_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
|
||||||
|
|
||||||
/* filter vertical block boundaries */
|
/* filter vertical block boundaries */
|
||||||
v_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
|
v_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
|
||||||
v_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
|
v_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
|
||||||
v_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
|
v_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
ec_clean:
|
ec_clean:
|
||||||
|
@ -215,7 +215,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
|
|||||||
|
|
||||||
s->mv_dir = MV_DIR_FORWARD;
|
s->mv_dir = MV_DIR_FORWARD;
|
||||||
s->mv_type = MV_TYPE_16X16;
|
s->mv_type = MV_TYPE_16X16;
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
s->mv[0][0][0] = 0;
|
s->mv[0][0][0] = 0;
|
||||||
s->mv[0][0][1] = 0;
|
s->mv[0][0][1] = 0;
|
||||||
s->mb_skipped = 1;
|
s->mb_skipped = 1;
|
||||||
@ -323,14 +323,14 @@ static int h261_decode_mb(H261Context *h){
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(s->mb_intra){
|
if(s->mb_intra){
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
|
||||||
goto intra;
|
goto intra;
|
||||||
}
|
}
|
||||||
|
|
||||||
//set motion vectors
|
//set motion vectors
|
||||||
s->mv_dir = MV_DIR_FORWARD;
|
s->mv_dir = MV_DIR_FORWARD;
|
||||||
s->mv_type = MV_TYPE_16X16;
|
s->mv_type = MV_TYPE_16X16;
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation
|
s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation
|
||||||
s->mv[0][0][1] = h->current_mv_y * 2;
|
s->mv[0][0][1] = h->current_mv_y * 2;
|
||||||
|
|
||||||
@ -464,7 +464,7 @@ static int h261_decode_picture_header(H261Context *h){
|
|||||||
s->picture_number = (s->picture_number&~31) + i;
|
s->picture_number = (s->picture_number&~31) + i;
|
||||||
|
|
||||||
s->avctx->time_base= (AVRational){1001, 30000};
|
s->avctx->time_base= (AVRational){1001, 30000};
|
||||||
s->current_picture.pts= s->picture_number;
|
s->current_picture.f.pts = s->picture_number;
|
||||||
|
|
||||||
|
|
||||||
/* PTYPE starts here */
|
/* PTYPE starts here */
|
||||||
@ -570,7 +570,7 @@ retry:
|
|||||||
}
|
}
|
||||||
|
|
||||||
//we need to set current_picture_ptr before reading the header, otherwise we cannot store anyting im there
|
//we need to set current_picture_ptr before reading the header, otherwise we cannot store anyting im there
|
||||||
if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
|
if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
|
||||||
int i= ff_find_unused_picture(s, 0);
|
int i= ff_find_unused_picture(s, 0);
|
||||||
s->current_picture_ptr= &s->picture[i];
|
s->current_picture_ptr= &s->picture[i];
|
||||||
}
|
}
|
||||||
@ -596,8 +596,8 @@ retry:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for skipping the frame
|
// for skipping the frame
|
||||||
s->current_picture.pict_type= s->pict_type;
|
s->current_picture.f.pict_type = s->pict_type;
|
||||||
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
|
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
|
||||||
|
|
||||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|
||||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|
||||||
|
@ -52,7 +52,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
|
|||||||
const int wrap = s->b8_stride;
|
const int wrap = s->b8_stride;
|
||||||
const int xy = s->block_index[0];
|
const int xy = s->block_index[0];
|
||||||
|
|
||||||
s->current_picture.mbskip_table[mb_xy]= s->mb_skipped;
|
s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped;
|
||||||
|
|
||||||
if(s->mv_type != MV_TYPE_8X8){
|
if(s->mv_type != MV_TYPE_8X8){
|
||||||
int motion_x, motion_y;
|
int motion_x, motion_y;
|
||||||
@ -71,30 +71,30 @@ void ff_h263_update_motion_val(MpegEncContext * s){
|
|||||||
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
|
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
|
||||||
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
|
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
|
||||||
}
|
}
|
||||||
s->current_picture.ref_index[0][4*mb_xy ]=
|
s->current_picture.f.ref_index[0][4*mb_xy ] =
|
||||||
s->current_picture.ref_index[0][4*mb_xy + 1]= s->field_select[0][0];
|
s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
|
||||||
s->current_picture.ref_index[0][4*mb_xy + 2]=
|
s->current_picture.f.ref_index[0][4*mb_xy + 2] =
|
||||||
s->current_picture.ref_index[0][4*mb_xy + 3]= s->field_select[0][1];
|
s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* no update if 8X8 because it has been done during parsing */
|
/* no update if 8X8 because it has been done during parsing */
|
||||||
s->current_picture.motion_val[0][xy][0] = motion_x;
|
s->current_picture.f.motion_val[0][xy][0] = motion_x;
|
||||||
s->current_picture.motion_val[0][xy][1] = motion_y;
|
s->current_picture.f.motion_val[0][xy][1] = motion_y;
|
||||||
s->current_picture.motion_val[0][xy + 1][0] = motion_x;
|
s->current_picture.f.motion_val[0][xy + 1][0] = motion_x;
|
||||||
s->current_picture.motion_val[0][xy + 1][1] = motion_y;
|
s->current_picture.f.motion_val[0][xy + 1][1] = motion_y;
|
||||||
s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
|
s->current_picture.f.motion_val[0][xy + wrap][0] = motion_x;
|
||||||
s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
|
s->current_picture.f.motion_val[0][xy + wrap][1] = motion_y;
|
||||||
s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
|
s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x;
|
||||||
s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
|
s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(s->encoding){ //FIXME encoding MUST be cleaned up
|
if(s->encoding){ //FIXME encoding MUST be cleaned up
|
||||||
if (s->mv_type == MV_TYPE_8X8)
|
if (s->mv_type == MV_TYPE_8X8)
|
||||||
s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
|
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
|
||||||
else if(s->mb_intra)
|
else if(s->mb_intra)
|
||||||
s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA;
|
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA;
|
||||||
else
|
else
|
||||||
s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
|
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,7 +154,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
|||||||
Diag Top
|
Diag Top
|
||||||
Left Center
|
Left Center
|
||||||
*/
|
*/
|
||||||
if(!IS_SKIP(s->current_picture.mb_type[xy])){
|
if (!IS_SKIP(s->current_picture.f.mb_type[xy])) {
|
||||||
qp_c= s->qscale;
|
qp_c= s->qscale;
|
||||||
s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
|
s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
|
||||||
s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
|
s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
|
||||||
@ -164,10 +164,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
|||||||
if(s->mb_y){
|
if(s->mb_y){
|
||||||
int qp_dt, qp_tt, qp_tc;
|
int qp_dt, qp_tt, qp_tc;
|
||||||
|
|
||||||
if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride]))
|
if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride]))
|
||||||
qp_tt=0;
|
qp_tt=0;
|
||||||
else
|
else
|
||||||
qp_tt= s->current_picture.qscale_table[xy-s->mb_stride];
|
qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride];
|
||||||
|
|
||||||
if(qp_c)
|
if(qp_c)
|
||||||
qp_tc= qp_c;
|
qp_tc= qp_c;
|
||||||
@ -187,10 +187,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
|||||||
s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
|
s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
|
||||||
|
|
||||||
if(s->mb_x){
|
if(s->mb_x){
|
||||||
if(qp_tt || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride]))
|
if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride]))
|
||||||
qp_dt= qp_tt;
|
qp_dt= qp_tt;
|
||||||
else
|
else
|
||||||
qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride];
|
qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride];
|
||||||
|
|
||||||
if(qp_dt){
|
if(qp_dt){
|
||||||
const int chroma_qp= s->chroma_qscale_table[qp_dt];
|
const int chroma_qp= s->chroma_qscale_table[qp_dt];
|
||||||
@ -209,10 +209,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
|||||||
|
|
||||||
if(s->mb_x){
|
if(s->mb_x){
|
||||||
int qp_lc;
|
int qp_lc;
|
||||||
if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1]))
|
if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1]))
|
||||||
qp_lc= qp_c;
|
qp_lc= qp_c;
|
||||||
else
|
else
|
||||||
qp_lc= s->current_picture.qscale_table[xy-1];
|
qp_lc = s->current_picture.f.qscale_table[xy - 1];
|
||||||
|
|
||||||
if(qp_lc){
|
if(qp_lc){
|
||||||
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
|
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
|
||||||
@ -321,7 +321,7 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
|
|||||||
static const int off[4]= {2, 1, 1, -1};
|
static const int off[4]= {2, 1, 1, -1};
|
||||||
|
|
||||||
wrap = s->b8_stride;
|
wrap = s->b8_stride;
|
||||||
mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
|
mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block];
|
||||||
|
|
||||||
A = mot_val[ - 1];
|
A = mot_val[ - 1];
|
||||||
/* special case for first (slice) line */
|
/* special case for first (slice) line */
|
||||||
|
@ -394,7 +394,7 @@ retry:
|
|||||||
|
|
||||||
/* We need to set current_picture_ptr before reading the header,
|
/* We need to set current_picture_ptr before reading the header,
|
||||||
* otherwise we cannot store anyting in there */
|
* otherwise we cannot store anyting in there */
|
||||||
if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
|
if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
|
||||||
int i= ff_find_unused_picture(s, 0);
|
int i= ff_find_unused_picture(s, 0);
|
||||||
s->current_picture_ptr= &s->picture[i];
|
s->current_picture_ptr= &s->picture[i];
|
||||||
}
|
}
|
||||||
@ -581,8 +581,8 @@ retry:
|
|||||||
s->gob_index = ff_h263_get_gob_height(s);
|
s->gob_index = ff_h263_get_gob_height(s);
|
||||||
|
|
||||||
// for skipping the frame
|
// for skipping the frame
|
||||||
s->current_picture.pict_type= s->pict_type;
|
s->current_picture.f.pict_type = s->pict_type;
|
||||||
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
|
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
|
||||||
|
|
||||||
/* skip B-frames if we don't have reference frames */
|
/* skip B-frames if we don't have reference frames */
|
||||||
if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size);
|
if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size);
|
||||||
|
@ -261,8 +261,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int
|
|||||||
// Error resilience puts the current picture in the ref list.
|
// Error resilience puts the current picture in the ref list.
|
||||||
// Don't try to wait on these as it will cause a deadlock.
|
// Don't try to wait on these as it will cause a deadlock.
|
||||||
// Fields can wait on each other, though.
|
// Fields can wait on each other, though.
|
||||||
if(ref->thread_opaque != s->current_picture.thread_opaque ||
|
if (ref->f.thread_opaque != s->current_picture.f.thread_opaque ||
|
||||||
(ref->reference&3) != s->picture_structure) {
|
(ref->f.reference & 3) != s->picture_structure) {
|
||||||
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
|
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
|
||||||
if (refs[0][ref_n] < 0) nrefs[0] += 1;
|
if (refs[0][ref_n] < 0) nrefs[0] += 1;
|
||||||
refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
|
refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
|
||||||
@ -273,8 +273,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int
|
|||||||
int ref_n = h->ref_cache[1][ scan8[n] ];
|
int ref_n = h->ref_cache[1][ scan8[n] ];
|
||||||
Picture *ref= &h->ref_list[1][ref_n];
|
Picture *ref= &h->ref_list[1][ref_n];
|
||||||
|
|
||||||
if(ref->thread_opaque != s->current_picture.thread_opaque ||
|
if (ref->f.thread_opaque != s->current_picture.f.thread_opaque ||
|
||||||
(ref->reference&3) != s->picture_structure) {
|
(ref->f.reference & 3) != s->picture_structure) {
|
||||||
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
|
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
|
||||||
if (refs[1][ref_n] < 0) nrefs[1] += 1;
|
if (refs[1][ref_n] < 0) nrefs[1] += 1;
|
||||||
refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
|
refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
|
||||||
@ -290,7 +290,7 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int
|
|||||||
static void await_references(H264Context *h){
|
static void await_references(H264Context *h){
|
||||||
MpegEncContext * const s = &h->s;
|
MpegEncContext * const s = &h->s;
|
||||||
const int mb_xy= h->mb_xy;
|
const int mb_xy= h->mb_xy;
|
||||||
const int mb_type= s->current_picture.mb_type[mb_xy];
|
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||||
int refs[2][48];
|
int refs[2][48];
|
||||||
int nrefs[2] = {0};
|
int nrefs[2] = {0};
|
||||||
int ref, list;
|
int ref, list;
|
||||||
@ -350,7 +350,7 @@ static void await_references(H264Context *h){
|
|||||||
int row = refs[list][ref];
|
int row = refs[list][ref];
|
||||||
if(row >= 0){
|
if(row >= 0){
|
||||||
Picture *ref_pic = &h->ref_list[list][ref];
|
Picture *ref_pic = &h->ref_list[list][ref];
|
||||||
int ref_field = ref_pic->reference - 1;
|
int ref_field = ref_pic->f.reference - 1;
|
||||||
int ref_field_picture = ref_pic->field_picture;
|
int ref_field_picture = ref_pic->field_picture;
|
||||||
int pic_height = 16*s->mb_height >> ref_field_picture;
|
int pic_height = 16*s->mb_height >> ref_field_picture;
|
||||||
|
|
||||||
@ -448,7 +448,7 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square,
|
|||||||
int my= h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
|
int my= h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
|
||||||
const int luma_xy= (mx&3) + ((my&3)<<2);
|
const int luma_xy= (mx&3) + ((my&3)<<2);
|
||||||
int offset = ((mx>>2) << pixel_shift) + (my>>2)*h->mb_linesize;
|
int offset = ((mx>>2) << pixel_shift) + (my>>2)*h->mb_linesize;
|
||||||
uint8_t * src_y = pic->data[0] + offset;
|
uint8_t * src_y = pic->f.data[0] + offset;
|
||||||
uint8_t * src_cb, * src_cr;
|
uint8_t * src_cb, * src_cr;
|
||||||
int extra_width= h->emu_edge_width;
|
int extra_width= h->emu_edge_width;
|
||||||
int extra_height= h->emu_edge_height;
|
int extra_height= h->emu_edge_height;
|
||||||
@ -478,7 +478,7 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square,
|
|||||||
if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return;
|
if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return;
|
||||||
|
|
||||||
if(chroma444){
|
if(chroma444){
|
||||||
src_cb = pic->data[1] + offset;
|
src_cb = pic->f.data[1] + offset;
|
||||||
if(emu){
|
if(emu){
|
||||||
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cb - (2 << pixel_shift) - 2*h->mb_linesize, h->mb_linesize,
|
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cb - (2 << pixel_shift) - 2*h->mb_linesize, h->mb_linesize,
|
||||||
16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
|
16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
|
||||||
@ -489,7 +489,7 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square,
|
|||||||
qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize);
|
qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize);
|
||||||
}
|
}
|
||||||
|
|
||||||
src_cr = pic->data[2] + offset;
|
src_cr = pic->f.data[2] + offset;
|
||||||
if(emu){
|
if(emu){
|
||||||
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cr - (2 << pixel_shift) - 2*h->mb_linesize, h->mb_linesize,
|
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cr - (2 << pixel_shift) - 2*h->mb_linesize, h->mb_linesize,
|
||||||
16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
|
16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
|
||||||
@ -504,11 +504,11 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square,
|
|||||||
|
|
||||||
if(MB_FIELD){
|
if(MB_FIELD){
|
||||||
// chroma offset when predicting from a field of opposite parity
|
// chroma offset when predicting from a field of opposite parity
|
||||||
my += 2 * ((s->mb_y & 1) - (pic->reference - 1));
|
my += 2 * ((s->mb_y & 1) - (pic->f.reference - 1));
|
||||||
emu |= (my>>3) < 0 || (my>>3) + 8 >= (pic_height>>1);
|
emu |= (my>>3) < 0 || (my>>3) + 8 >= (pic_height>>1);
|
||||||
}
|
}
|
||||||
src_cb= pic->data[1] + ((mx>>3) << pixel_shift) + (my>>3)*h->mb_uvlinesize;
|
src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) + (my >> 3) * h->mb_uvlinesize;
|
||||||
src_cr= pic->data[2] + ((mx>>3) << pixel_shift) + (my>>3)*h->mb_uvlinesize;
|
src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) + (my >> 3) * h->mb_uvlinesize;
|
||||||
|
|
||||||
if(emu){
|
if(emu){
|
||||||
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cb, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
|
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cb, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
|
||||||
@ -664,7 +664,7 @@ static inline void prefetch_motion(H264Context *h, int list, int pixel_shift, in
|
|||||||
if(refn >= 0){
|
if(refn >= 0){
|
||||||
const int mx= (h->mv_cache[list][scan8[0]][0]>>2) + 16*s->mb_x + 8;
|
const int mx= (h->mv_cache[list][scan8[0]][0]>>2) + 16*s->mb_x + 8;
|
||||||
const int my= (h->mv_cache[list][scan8[0]][1]>>2) + 16*s->mb_y;
|
const int my= (h->mv_cache[list][scan8[0]][1]>>2) + 16*s->mb_y;
|
||||||
uint8_t **src= h->ref_list[list][refn].data;
|
uint8_t **src = h->ref_list[list][refn].f.data;
|
||||||
int off= (mx << pixel_shift) + (my + (s->mb_x&3)*4)*h->mb_linesize + (64 << pixel_shift);
|
int off= (mx << pixel_shift) + (my + (s->mb_x&3)*4)*h->mb_linesize + (64 << pixel_shift);
|
||||||
s->dsp.prefetch(src[0]+off, s->linesize, 4);
|
s->dsp.prefetch(src[0]+off, s->linesize, 4);
|
||||||
if(chroma444){
|
if(chroma444){
|
||||||
@ -684,7 +684,7 @@ static av_always_inline void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t
|
|||||||
int pixel_shift, int chroma444){
|
int pixel_shift, int chroma444){
|
||||||
MpegEncContext * const s = &h->s;
|
MpegEncContext * const s = &h->s;
|
||||||
const int mb_xy= h->mb_xy;
|
const int mb_xy= h->mb_xy;
|
||||||
const int mb_type= s->current_picture.mb_type[mb_xy];
|
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||||
|
|
||||||
assert(IS_INTER(mb_type));
|
assert(IS_INTER(mb_type));
|
||||||
|
|
||||||
@ -1220,7 +1220,7 @@ int ff_h264_frame_start(H264Context *h){
|
|||||||
* Zero here; IDR markings per slice in frame or fields are ORed in later.
|
* Zero here; IDR markings per slice in frame or fields are ORed in later.
|
||||||
* See decode_nal_units().
|
* See decode_nal_units().
|
||||||
*/
|
*/
|
||||||
s->current_picture_ptr->key_frame= 0;
|
s->current_picture_ptr->f.key_frame = 0;
|
||||||
s->current_picture_ptr->mmco_reset= 0;
|
s->current_picture_ptr->mmco_reset= 0;
|
||||||
|
|
||||||
assert(s->linesize && s->uvlinesize);
|
assert(s->linesize && s->uvlinesize);
|
||||||
@ -1245,7 +1245,7 @@ int ff_h264_frame_start(H264Context *h){
|
|||||||
/* some macroblocks can be accessed before they're available in case of lost slices, mbaff or threading*/
|
/* some macroblocks can be accessed before they're available in case of lost slices, mbaff or threading*/
|
||||||
memset(h->slice_table, -1, (s->mb_height*s->mb_stride-1) * sizeof(*h->slice_table));
|
memset(h->slice_table, -1, (s->mb_height*s->mb_stride-1) * sizeof(*h->slice_table));
|
||||||
|
|
||||||
// s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
|
// s->decode = (s->flags & CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.f.reference /*|| h->contains_intra*/ || 1;
|
||||||
|
|
||||||
// We mark the current picture as non-reference after allocating it, so
|
// We mark the current picture as non-reference after allocating it, so
|
||||||
// that if we break out due to an error it can be released automatically
|
// that if we break out due to an error it can be released automatically
|
||||||
@ -1254,7 +1254,7 @@ int ff_h264_frame_start(H264Context *h){
|
|||||||
// get released even with set reference, besides SVQ3 and others do not
|
// get released even with set reference, besides SVQ3 and others do not
|
||||||
// mark frames as reference later "naturally".
|
// mark frames as reference later "naturally".
|
||||||
if(s->codec_id != CODEC_ID_SVQ3)
|
if(s->codec_id != CODEC_ID_SVQ3)
|
||||||
s->current_picture_ptr->reference= 0;
|
s->current_picture_ptr->f.reference = 0;
|
||||||
|
|
||||||
s->current_picture_ptr->field_poc[0]=
|
s->current_picture_ptr->field_poc[0]=
|
||||||
s->current_picture_ptr->field_poc[1]= INT_MAX;
|
s->current_picture_ptr->field_poc[1]= INT_MAX;
|
||||||
@ -1280,8 +1280,8 @@ static void decode_postinit(H264Context *h, int setup_finished){
|
|||||||
Picture *cur = s->current_picture_ptr;
|
Picture *cur = s->current_picture_ptr;
|
||||||
int i, pics, out_of_order, out_idx;
|
int i, pics, out_of_order, out_idx;
|
||||||
|
|
||||||
s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264;
|
s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_H264;
|
||||||
s->current_picture_ptr->pict_type= s->pict_type;
|
s->current_picture_ptr->f.pict_type = s->pict_type;
|
||||||
|
|
||||||
if (h->next_output_pic) return;
|
if (h->next_output_pic) return;
|
||||||
|
|
||||||
@ -1294,8 +1294,8 @@ static void decode_postinit(H264Context *h, int setup_finished){
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
cur->interlaced_frame = 0;
|
cur->f.interlaced_frame = 0;
|
||||||
cur->repeat_pict = 0;
|
cur->f.repeat_pict = 0;
|
||||||
|
|
||||||
/* Signal interlacing information externally. */
|
/* Signal interlacing information externally. */
|
||||||
/* Prioritize picture timing SEI information over used decoding process if it exists. */
|
/* Prioritize picture timing SEI information over used decoding process if it exists. */
|
||||||
@ -1307,53 +1307,53 @@ static void decode_postinit(H264Context *h, int setup_finished){
|
|||||||
break;
|
break;
|
||||||
case SEI_PIC_STRUCT_TOP_FIELD:
|
case SEI_PIC_STRUCT_TOP_FIELD:
|
||||||
case SEI_PIC_STRUCT_BOTTOM_FIELD:
|
case SEI_PIC_STRUCT_BOTTOM_FIELD:
|
||||||
cur->interlaced_frame = 1;
|
cur->f.interlaced_frame = 1;
|
||||||
break;
|
break;
|
||||||
case SEI_PIC_STRUCT_TOP_BOTTOM:
|
case SEI_PIC_STRUCT_TOP_BOTTOM:
|
||||||
case SEI_PIC_STRUCT_BOTTOM_TOP:
|
case SEI_PIC_STRUCT_BOTTOM_TOP:
|
||||||
if (FIELD_OR_MBAFF_PICTURE)
|
if (FIELD_OR_MBAFF_PICTURE)
|
||||||
cur->interlaced_frame = 1;
|
cur->f.interlaced_frame = 1;
|
||||||
else
|
else
|
||||||
// try to flag soft telecine progressive
|
// try to flag soft telecine progressive
|
||||||
cur->interlaced_frame = h->prev_interlaced_frame;
|
cur->f.interlaced_frame = h->prev_interlaced_frame;
|
||||||
break;
|
break;
|
||||||
case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
|
case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
|
||||||
case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
|
case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
|
||||||
// Signal the possibility of telecined film externally (pic_struct 5,6)
|
// Signal the possibility of telecined film externally (pic_struct 5,6)
|
||||||
// From these hints, let the applications decide if they apply deinterlacing.
|
// From these hints, let the applications decide if they apply deinterlacing.
|
||||||
cur->repeat_pict = 1;
|
cur->f.repeat_pict = 1;
|
||||||
break;
|
break;
|
||||||
case SEI_PIC_STRUCT_FRAME_DOUBLING:
|
case SEI_PIC_STRUCT_FRAME_DOUBLING:
|
||||||
// Force progressive here, as doubling interlaced frame is a bad idea.
|
// Force progressive here, as doubling interlaced frame is a bad idea.
|
||||||
cur->repeat_pict = 2;
|
cur->f.repeat_pict = 2;
|
||||||
break;
|
break;
|
||||||
case SEI_PIC_STRUCT_FRAME_TRIPLING:
|
case SEI_PIC_STRUCT_FRAME_TRIPLING:
|
||||||
cur->repeat_pict = 4;
|
cur->f.repeat_pict = 4;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((h->sei_ct_type & 3) && h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
|
if ((h->sei_ct_type & 3) && h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
|
||||||
cur->interlaced_frame = (h->sei_ct_type & (1<<1)) != 0;
|
cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
|
||||||
}else{
|
}else{
|
||||||
/* Derive interlacing flag from used decoding process. */
|
/* Derive interlacing flag from used decoding process. */
|
||||||
cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE;
|
cur->f.interlaced_frame = FIELD_OR_MBAFF_PICTURE;
|
||||||
}
|
}
|
||||||
h->prev_interlaced_frame = cur->interlaced_frame;
|
h->prev_interlaced_frame = cur->f.interlaced_frame;
|
||||||
|
|
||||||
if (cur->field_poc[0] != cur->field_poc[1]){
|
if (cur->field_poc[0] != cur->field_poc[1]){
|
||||||
/* Derive top_field_first from field pocs. */
|
/* Derive top_field_first from field pocs. */
|
||||||
cur->top_field_first = cur->field_poc[0] < cur->field_poc[1];
|
cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
|
||||||
}else{
|
}else{
|
||||||
if(cur->interlaced_frame || h->sps.pic_struct_present_flag){
|
if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
|
||||||
/* Use picture timing SEI information. Even if it is a information of a past frame, better than nothing. */
|
/* Use picture timing SEI information. Even if it is a information of a past frame, better than nothing. */
|
||||||
if(h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM
|
if(h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM
|
||||||
|| h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
|
|| h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
|
||||||
cur->top_field_first = 1;
|
cur->f.top_field_first = 1;
|
||||||
else
|
else
|
||||||
cur->top_field_first = 0;
|
cur->f.top_field_first = 0;
|
||||||
}else{
|
}else{
|
||||||
/* Most likely progressive */
|
/* Most likely progressive */
|
||||||
cur->top_field_first = 0;
|
cur->f.top_field_first = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1379,17 +1379,17 @@ static void decode_postinit(H264Context *h, int setup_finished){
|
|||||||
assert(pics <= MAX_DELAYED_PIC_COUNT);
|
assert(pics <= MAX_DELAYED_PIC_COUNT);
|
||||||
|
|
||||||
h->delayed_pic[pics++] = cur;
|
h->delayed_pic[pics++] = cur;
|
||||||
if(cur->reference == 0)
|
if (cur->f.reference == 0)
|
||||||
cur->reference = DELAYED_PIC_REF;
|
cur->f.reference = DELAYED_PIC_REF;
|
||||||
|
|
||||||
out = h->delayed_pic[0];
|
out = h->delayed_pic[0];
|
||||||
out_idx = 0;
|
out_idx = 0;
|
||||||
for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++)
|
for (i = 1; h->delayed_pic[i] && !h->delayed_pic[i]->f.key_frame && !h->delayed_pic[i]->mmco_reset; i++)
|
||||||
if(h->delayed_pic[i]->poc < out->poc){
|
if(h->delayed_pic[i]->poc < out->poc){
|
||||||
out = h->delayed_pic[i];
|
out = h->delayed_pic[i];
|
||||||
out_idx = i;
|
out_idx = i;
|
||||||
}
|
}
|
||||||
if(s->avctx->has_b_frames == 0 && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset))
|
if (s->avctx->has_b_frames == 0 && (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset))
|
||||||
h->next_outputed_poc= INT_MIN;
|
h->next_outputed_poc= INT_MIN;
|
||||||
out_of_order = out->poc < h->next_outputed_poc;
|
out_of_order = out->poc < h->next_outputed_poc;
|
||||||
|
|
||||||
@ -1398,14 +1398,14 @@ static void decode_postinit(H264Context *h, int setup_finished){
|
|||||||
else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT)
|
else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT)
|
||||||
|| (s->low_delay &&
|
|| (s->low_delay &&
|
||||||
((h->next_outputed_poc != INT_MIN && out->poc > h->next_outputed_poc + 2)
|
((h->next_outputed_poc != INT_MIN && out->poc > h->next_outputed_poc + 2)
|
||||||
|| cur->pict_type == AV_PICTURE_TYPE_B)))
|
|| cur->f.pict_type == AV_PICTURE_TYPE_B)))
|
||||||
{
|
{
|
||||||
s->low_delay = 0;
|
s->low_delay = 0;
|
||||||
s->avctx->has_b_frames++;
|
s->avctx->has_b_frames++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(out_of_order || pics > s->avctx->has_b_frames){
|
if(out_of_order || pics > s->avctx->has_b_frames){
|
||||||
out->reference &= ~DELAYED_PIC_REF;
|
out->f.reference &= ~DELAYED_PIC_REF;
|
||||||
out->owner2 = s; // for frame threading, the owner must be the second field's thread
|
out->owner2 = s; // for frame threading, the owner must be the second field's thread
|
||||||
// or else the first thread can release the picture and reuse it unsafely
|
// or else the first thread can release the picture and reuse it unsafely
|
||||||
for(i=out_idx; h->delayed_pic[i]; i++)
|
for(i=out_idx; h->delayed_pic[i]; i++)
|
||||||
@ -1413,7 +1413,7 @@ static void decode_postinit(H264Context *h, int setup_finished){
|
|||||||
}
|
}
|
||||||
if(!out_of_order && pics > s->avctx->has_b_frames){
|
if(!out_of_order && pics > s->avctx->has_b_frames){
|
||||||
h->next_output_pic = out;
|
h->next_output_pic = out;
|
||||||
if(out_idx==0 && h->delayed_pic[0] && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset)) {
|
if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset)) {
|
||||||
h->next_outputed_poc = INT_MIN;
|
h->next_outputed_poc = INT_MIN;
|
||||||
} else
|
} else
|
||||||
h->next_outputed_poc = out->poc;
|
h->next_outputed_poc = out->poc;
|
||||||
@ -1757,7 +1757,7 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
|
|||||||
const int mb_x= s->mb_x;
|
const int mb_x= s->mb_x;
|
||||||
const int mb_y= s->mb_y;
|
const int mb_y= s->mb_y;
|
||||||
const int mb_xy= h->mb_xy;
|
const int mb_xy= h->mb_xy;
|
||||||
const int mb_type= s->current_picture.mb_type[mb_xy];
|
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||||
uint8_t *dest_y, *dest_cb, *dest_cr;
|
uint8_t *dest_y, *dest_cb, *dest_cr;
|
||||||
int linesize, uvlinesize /*dct_offset*/;
|
int linesize, uvlinesize /*dct_offset*/;
|
||||||
int i, j;
|
int i, j;
|
||||||
@ -1767,9 +1767,9 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
|
|||||||
const int is_h264 = !CONFIG_SVQ3_DECODER || simple || s->codec_id == CODEC_ID_H264;
|
const int is_h264 = !CONFIG_SVQ3_DECODER || simple || s->codec_id == CODEC_ID_H264;
|
||||||
void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
|
void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
|
||||||
|
|
||||||
dest_y = s->current_picture.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
|
dest_y = s->current_picture.f.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
|
||||||
dest_cb = s->current_picture.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
|
dest_cb = s->current_picture.f.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
|
||||||
dest_cr = s->current_picture.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
|
dest_cr = s->current_picture.f.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
|
||||||
|
|
||||||
s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + (64 << pixel_shift), s->linesize, 4);
|
s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + (64 << pixel_shift), s->linesize, 4);
|
||||||
s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + (64 << pixel_shift), dest_cr - dest_cb, 2);
|
s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + (64 << pixel_shift), dest_cr - dest_cb, 2);
|
||||||
@ -1918,7 +1918,7 @@ static av_always_inline void hl_decode_mb_444_internal(H264Context *h, int simpl
|
|||||||
const int mb_x= s->mb_x;
|
const int mb_x= s->mb_x;
|
||||||
const int mb_y= s->mb_y;
|
const int mb_y= s->mb_y;
|
||||||
const int mb_xy= h->mb_xy;
|
const int mb_xy= h->mb_xy;
|
||||||
const int mb_type= s->current_picture.mb_type[mb_xy];
|
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||||
uint8_t *dest[3];
|
uint8_t *dest[3];
|
||||||
int linesize;
|
int linesize;
|
||||||
int i, j, p;
|
int i, j, p;
|
||||||
@ -1928,7 +1928,7 @@ static av_always_inline void hl_decode_mb_444_internal(H264Context *h, int simpl
|
|||||||
|
|
||||||
for (p = 0; p < plane_count; p++)
|
for (p = 0; p < plane_count; p++)
|
||||||
{
|
{
|
||||||
dest[p] = s->current_picture.data[p] + ((mb_x << pixel_shift) + mb_y * s->linesize) * 16;
|
dest[p] = s->current_picture.f.data[p] + ((mb_x << pixel_shift) + mb_y * s->linesize) * 16;
|
||||||
s->dsp.prefetch(dest[p] + (s->mb_x&3)*4*s->linesize + (64 << pixel_shift), s->linesize, 4);
|
s->dsp.prefetch(dest[p] + (s->mb_x&3)*4*s->linesize + (64 << pixel_shift), s->linesize, 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2037,7 +2037,7 @@ static void av_noinline hl_decode_mb_444_simple(H264Context *h){
|
|||||||
void ff_h264_hl_decode_mb(H264Context *h){
|
void ff_h264_hl_decode_mb(H264Context *h){
|
||||||
MpegEncContext * const s = &h->s;
|
MpegEncContext * const s = &h->s;
|
||||||
const int mb_xy= h->mb_xy;
|
const int mb_xy= h->mb_xy;
|
||||||
const int mb_type= s->current_picture.mb_type[mb_xy];
|
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||||
int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || s->qscale == 0;
|
int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || s->qscale == 0;
|
||||||
|
|
||||||
if (CHROMA444) {
|
if (CHROMA444) {
|
||||||
@ -2191,14 +2191,14 @@ static void flush_dpb(AVCodecContext *avctx){
|
|||||||
int i;
|
int i;
|
||||||
for(i=0; i<MAX_DELAYED_PIC_COUNT; i++) {
|
for(i=0; i<MAX_DELAYED_PIC_COUNT; i++) {
|
||||||
if(h->delayed_pic[i])
|
if(h->delayed_pic[i])
|
||||||
h->delayed_pic[i]->reference= 0;
|
h->delayed_pic[i]->f.reference = 0;
|
||||||
h->delayed_pic[i]= NULL;
|
h->delayed_pic[i]= NULL;
|
||||||
}
|
}
|
||||||
h->outputed_poc=h->next_outputed_poc= INT_MIN;
|
h->outputed_poc=h->next_outputed_poc= INT_MIN;
|
||||||
h->prev_interlaced_frame = 1;
|
h->prev_interlaced_frame = 1;
|
||||||
idr(h);
|
idr(h);
|
||||||
if(h->s.current_picture_ptr)
|
if(h->s.current_picture_ptr)
|
||||||
h->s.current_picture_ptr->reference= 0;
|
h->s.current_picture_ptr->f.reference = 0;
|
||||||
h->s.first_field= 0;
|
h->s.first_field= 0;
|
||||||
ff_h264_reset_sei(h);
|
ff_h264_reset_sei(h);
|
||||||
ff_mpeg_flush(avctx);
|
ff_mpeg_flush(avctx);
|
||||||
@ -2677,8 +2677,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
|||||||
* be fixed. */
|
* be fixed. */
|
||||||
if (h->short_ref_count) {
|
if (h->short_ref_count) {
|
||||||
if (prev) {
|
if (prev) {
|
||||||
av_image_copy(h->short_ref[0]->data, h->short_ref[0]->linesize,
|
av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize,
|
||||||
(const uint8_t**)prev->data, prev->linesize,
|
(const uint8_t**)prev->f.data, prev->f.linesize,
|
||||||
s->avctx->pix_fmt, s->mb_width*16, s->mb_height*16);
|
s->avctx->pix_fmt, s->mb_width*16, s->mb_height*16);
|
||||||
h->short_ref[0]->poc = prev->poc+2;
|
h->short_ref[0]->poc = prev->poc+2;
|
||||||
}
|
}
|
||||||
@ -2689,7 +2689,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
|||||||
/* See if we have a decoded first field looking for a pair... */
|
/* See if we have a decoded first field looking for a pair... */
|
||||||
if (s0->first_field) {
|
if (s0->first_field) {
|
||||||
assert(s0->current_picture_ptr);
|
assert(s0->current_picture_ptr);
|
||||||
assert(s0->current_picture_ptr->data[0]);
|
assert(s0->current_picture_ptr->f.data[0]);
|
||||||
assert(s0->current_picture_ptr->reference != DELAYED_PIC_REF);
|
assert(s0->current_picture_ptr->reference != DELAYED_PIC_REF);
|
||||||
|
|
||||||
/* figure out if we have a complementary field pair */
|
/* figure out if we have a complementary field pair */
|
||||||
@ -2703,7 +2703,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
|||||||
|
|
||||||
} else {
|
} else {
|
||||||
if (h->nal_ref_idc &&
|
if (h->nal_ref_idc &&
|
||||||
s0->current_picture_ptr->reference &&
|
s0->current_picture_ptr->f.reference &&
|
||||||
s0->current_picture_ptr->frame_num != h->frame_num) {
|
s0->current_picture_ptr->frame_num != h->frame_num) {
|
||||||
/*
|
/*
|
||||||
* This and previous field were reference, but had
|
* This and previous field were reference, but had
|
||||||
@ -2951,16 +2951,16 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
|||||||
int *ref2frm= h->ref2frm[h->slice_num&(MAX_SLICES-1)][j];
|
int *ref2frm= h->ref2frm[h->slice_num&(MAX_SLICES-1)][j];
|
||||||
for(i=0; i<16; i++){
|
for(i=0; i<16; i++){
|
||||||
id_list[i]= 60;
|
id_list[i]= 60;
|
||||||
if(h->ref_list[j][i].data[0]){
|
if (h->ref_list[j][i].f.data[0]) {
|
||||||
int k;
|
int k;
|
||||||
uint8_t *base= h->ref_list[j][i].base[0];
|
uint8_t *base = h->ref_list[j][i].f.base[0];
|
||||||
for(k=0; k<h->short_ref_count; k++)
|
for(k=0; k<h->short_ref_count; k++)
|
||||||
if(h->short_ref[k]->base[0] == base){
|
if (h->short_ref[k]->f.base[0] == base) {
|
||||||
id_list[i]= k;
|
id_list[i]= k;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
for(k=0; k<h->long_ref_count; k++)
|
for(k=0; k<h->long_ref_count; k++)
|
||||||
if(h->long_ref[k] && h->long_ref[k]->base[0] == base){
|
if (h->long_ref[k] && h->long_ref[k]->f.base[0] == base) {
|
||||||
id_list[i]= h->short_ref_count + k;
|
id_list[i]= h->short_ref_count + k;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -2971,12 +2971,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
|||||||
ref2frm[1]= -1;
|
ref2frm[1]= -1;
|
||||||
for(i=0; i<16; i++)
|
for(i=0; i<16; i++)
|
||||||
ref2frm[i+2]= 4*id_list[i]
|
ref2frm[i+2]= 4*id_list[i]
|
||||||
+(h->ref_list[j][i].reference&3);
|
+ (h->ref_list[j][i].f.reference & 3);
|
||||||
ref2frm[18+0]=
|
ref2frm[18+0]=
|
||||||
ref2frm[18+1]= -1;
|
ref2frm[18+1]= -1;
|
||||||
for(i=16; i<48; i++)
|
for(i=16; i<48; i++)
|
||||||
ref2frm[i+4]= 4*id_list[(i-16)>>1]
|
ref2frm[i+4]= 4*id_list[(i-16)>>1]
|
||||||
+(h->ref_list[j][i].reference&3);
|
+ (h->ref_list[j][i].f.reference & 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
//FIXME: fix draw_edges+PAFF+frame threads
|
//FIXME: fix draw_edges+PAFF+frame threads
|
||||||
@ -3026,11 +3026,11 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h, MpegEncCon
|
|||||||
const int b_xy= h->mb2b_xy[top_xy] + 3*b_stride;
|
const int b_xy= h->mb2b_xy[top_xy] + 3*b_stride;
|
||||||
const int b8_xy= 4*top_xy + 2;
|
const int b8_xy= 4*top_xy + 2;
|
||||||
int (*ref2frm)[64] = h->ref2frm[ h->slice_table[top_xy]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
|
int (*ref2frm)[64] = h->ref2frm[ h->slice_table[top_xy]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
|
||||||
AV_COPY128(mv_dst - 1*8, s->current_picture.motion_val[list][b_xy + 0]);
|
AV_COPY128(mv_dst - 1*8, s->current_picture.f.motion_val[list][b_xy + 0]);
|
||||||
ref_cache[0 - 1*8]=
|
ref_cache[0 - 1*8]=
|
||||||
ref_cache[1 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 0]];
|
ref_cache[1 - 1*8]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 0]];
|
||||||
ref_cache[2 - 1*8]=
|
ref_cache[2 - 1*8]=
|
||||||
ref_cache[3 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 1]];
|
ref_cache[3 - 1*8]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 1]];
|
||||||
}else{
|
}else{
|
||||||
AV_ZERO128(mv_dst - 1*8);
|
AV_ZERO128(mv_dst - 1*8);
|
||||||
AV_WN32A(&ref_cache[0 - 1*8], ((LIST_NOT_USED)&0xFF)*0x01010101u);
|
AV_WN32A(&ref_cache[0 - 1*8], ((LIST_NOT_USED)&0xFF)*0x01010101u);
|
||||||
@ -3041,14 +3041,14 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h, MpegEncCon
|
|||||||
const int b_xy= h->mb2b_xy[left_xy[LTOP]] + 3;
|
const int b_xy= h->mb2b_xy[left_xy[LTOP]] + 3;
|
||||||
const int b8_xy= 4*left_xy[LTOP] + 1;
|
const int b8_xy= 4*left_xy[LTOP] + 1;
|
||||||
int (*ref2frm)[64] = h->ref2frm[ h->slice_table[left_xy[LTOP]]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
|
int (*ref2frm)[64] = h->ref2frm[ h->slice_table[left_xy[LTOP]]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
|
||||||
AV_COPY32(mv_dst - 1 + 0, s->current_picture.motion_val[list][b_xy + b_stride*0]);
|
AV_COPY32(mv_dst - 1 + 0, s->current_picture.f.motion_val[list][b_xy + b_stride*0]);
|
||||||
AV_COPY32(mv_dst - 1 + 8, s->current_picture.motion_val[list][b_xy + b_stride*1]);
|
AV_COPY32(mv_dst - 1 + 8, s->current_picture.f.motion_val[list][b_xy + b_stride*1]);
|
||||||
AV_COPY32(mv_dst - 1 +16, s->current_picture.motion_val[list][b_xy + b_stride*2]);
|
AV_COPY32(mv_dst - 1 + 16, s->current_picture.f.motion_val[list][b_xy + b_stride*2]);
|
||||||
AV_COPY32(mv_dst - 1 +24, s->current_picture.motion_val[list][b_xy + b_stride*3]);
|
AV_COPY32(mv_dst - 1 + 24, s->current_picture.f.motion_val[list][b_xy + b_stride*3]);
|
||||||
ref_cache[-1 + 0]=
|
ref_cache[-1 + 0]=
|
||||||
ref_cache[-1 + 8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 2*0]];
|
ref_cache[-1 + 8]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2*0]];
|
||||||
ref_cache[-1 + 16]=
|
ref_cache[-1 + 16]=
|
||||||
ref_cache[-1 + 24]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 2*1]];
|
ref_cache[-1 + 24]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2*1]];
|
||||||
}else{
|
}else{
|
||||||
AV_ZERO32(mv_dst - 1 + 0);
|
AV_ZERO32(mv_dst - 1 + 0);
|
||||||
AV_ZERO32(mv_dst - 1 + 8);
|
AV_ZERO32(mv_dst - 1 + 8);
|
||||||
@ -3072,7 +3072,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h, MpegEncCon
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
int8_t *ref = &s->current_picture.ref_index[list][4*mb_xy];
|
int8_t *ref = &s->current_picture.f.ref_index[list][4*mb_xy];
|
||||||
int (*ref2frm)[64] = h->ref2frm[ h->slice_num&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
|
int (*ref2frm)[64] = h->ref2frm[ h->slice_num&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
|
||||||
uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101;
|
uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101;
|
||||||
uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]],ref2frm[list][ref[3]])&0x00FF00FF)*0x0101;
|
uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]],ref2frm[list][ref[3]])&0x00FF00FF)*0x0101;
|
||||||
@ -3083,7 +3083,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h, MpegEncCon
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
int16_t (*mv_src)[2] = &s->current_picture.motion_val[list][4*s->mb_x + 4*s->mb_y*b_stride];
|
int16_t (*mv_src)[2] = &s->current_picture.f.motion_val[list][4*s->mb_x + 4*s->mb_y*b_stride];
|
||||||
AV_COPY128(mv_dst + 8*0, mv_src + 0*b_stride);
|
AV_COPY128(mv_dst + 8*0, mv_src + 0*b_stride);
|
||||||
AV_COPY128(mv_dst + 8*1, mv_src + 1*b_stride);
|
AV_COPY128(mv_dst + 8*1, mv_src + 1*b_stride);
|
||||||
AV_COPY128(mv_dst + 8*2, mv_src + 2*b_stride);
|
AV_COPY128(mv_dst + 8*2, mv_src + 2*b_stride);
|
||||||
@ -3110,7 +3110,7 @@ static int fill_filter_caches(H264Context *h, int mb_type){
|
|||||||
|
|
||||||
left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
|
left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
|
||||||
if(FRAME_MBAFF){
|
if(FRAME_MBAFF){
|
||||||
const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]);
|
const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
|
||||||
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
|
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
|
||||||
if(s->mb_y&1){
|
if(s->mb_y&1){
|
||||||
if (left_mb_field_flag != curr_mb_field_flag) {
|
if (left_mb_field_flag != curr_mb_field_flag) {
|
||||||
@ -3118,7 +3118,7 @@ static int fill_filter_caches(H264Context *h, int mb_type){
|
|||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
if(curr_mb_field_flag){
|
if(curr_mb_field_flag){
|
||||||
top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1);
|
top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy] >> 7) & 1) - 1);
|
||||||
}
|
}
|
||||||
if (left_mb_field_flag != curr_mb_field_flag) {
|
if (left_mb_field_flag != curr_mb_field_flag) {
|
||||||
left_xy[LBOT] += s->mb_stride;
|
left_xy[LBOT] += s->mb_stride;
|
||||||
@ -3133,21 +3133,21 @@ static int fill_filter_caches(H264Context *h, int mb_type){
|
|||||||
//for sufficiently low qp, filtering wouldn't do anything
|
//for sufficiently low qp, filtering wouldn't do anything
|
||||||
//this is a conservative estimate: could also check beta_offset and more accurate chroma_qp
|
//this is a conservative estimate: could also check beta_offset and more accurate chroma_qp
|
||||||
int qp_thresh = h->qp_thresh; //FIXME strictly we should store qp_thresh for each mb of a slice
|
int qp_thresh = h->qp_thresh; //FIXME strictly we should store qp_thresh for each mb of a slice
|
||||||
int qp = s->current_picture.qscale_table[mb_xy];
|
int qp = s->current_picture.f.qscale_table[mb_xy];
|
||||||
if(qp <= qp_thresh
|
if(qp <= qp_thresh
|
||||||
&& (left_xy[LTOP]<0 || ((qp + s->current_picture.qscale_table[left_xy[LTOP]] + 1)>>1) <= qp_thresh)
|
&& (left_xy[LTOP] < 0 || ((qp + s->current_picture.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh)
|
||||||
&& (top_xy <0 || ((qp + s->current_picture.qscale_table[top_xy ] + 1)>>1) <= qp_thresh)){
|
&& (top_xy < 0 || ((qp + s->current_picture.f.qscale_table[top_xy ] + 1) >> 1) <= qp_thresh)) {
|
||||||
if(!FRAME_MBAFF)
|
if(!FRAME_MBAFF)
|
||||||
return 1;
|
return 1;
|
||||||
if( (left_xy[LTOP]< 0 || ((qp + s->current_picture.qscale_table[left_xy[LBOT] ] + 1)>>1) <= qp_thresh)
|
if ((left_xy[LTOP] < 0 || ((qp + s->current_picture.f.qscale_table[left_xy[LBOT] ] + 1) >> 1) <= qp_thresh) &&
|
||||||
&& (top_xy < s->mb_stride || ((qp + s->current_picture.qscale_table[top_xy -s->mb_stride] + 1)>>1) <= qp_thresh))
|
(top_xy < s->mb_stride || ((qp + s->current_picture.f.qscale_table[top_xy - s->mb_stride] + 1) >> 1) <= qp_thresh))
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
top_type = s->current_picture.mb_type[top_xy];
|
top_type = s->current_picture.f.mb_type[top_xy];
|
||||||
left_type[LTOP] = s->current_picture.mb_type[left_xy[LTOP]];
|
left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
|
||||||
left_type[LBOT] = s->current_picture.mb_type[left_xy[LBOT]];
|
left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
|
||||||
if(h->deblocking_filter == 2){
|
if(h->deblocking_filter == 2){
|
||||||
if(h->slice_table[top_xy ] != h->slice_num) top_type= 0;
|
if(h->slice_table[top_xy ] != h->slice_num) top_type= 0;
|
||||||
if(h->slice_table[left_xy[LBOT]] != h->slice_num) left_type[LTOP]= left_type[LBOT]= 0;
|
if(h->slice_table[left_xy[LBOT]] != h->slice_num) left_type[LTOP]= left_type[LBOT]= 0;
|
||||||
@ -3236,7 +3236,7 @@ static void loop_filter(H264Context *h, int start_x, int end_x){
|
|||||||
int mb_xy, mb_type;
|
int mb_xy, mb_type;
|
||||||
mb_xy = h->mb_xy = mb_x + mb_y*s->mb_stride;
|
mb_xy = h->mb_xy = mb_x + mb_y*s->mb_stride;
|
||||||
h->slice_num= h->slice_table[mb_xy];
|
h->slice_num= h->slice_table[mb_xy];
|
||||||
mb_type= s->current_picture.mb_type[mb_xy];
|
mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||||
h->list_count= h->list_counts[mb_xy];
|
h->list_count= h->list_counts[mb_xy];
|
||||||
|
|
||||||
if(FRAME_MBAFF)
|
if(FRAME_MBAFF)
|
||||||
@ -3244,9 +3244,9 @@ static void loop_filter(H264Context *h, int start_x, int end_x){
|
|||||||
|
|
||||||
s->mb_x= mb_x;
|
s->mb_x= mb_x;
|
||||||
s->mb_y= mb_y;
|
s->mb_y= mb_y;
|
||||||
dest_y = s->current_picture.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
|
dest_y = s->current_picture.f.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
|
||||||
dest_cb = s->current_picture.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444);
|
dest_cb = s->current_picture.f.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444);
|
||||||
dest_cr = s->current_picture.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444);
|
dest_cr = s->current_picture.f.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444);
|
||||||
//FIXME simplify above
|
//FIXME simplify above
|
||||||
|
|
||||||
if (MB_FIELD) {
|
if (MB_FIELD) {
|
||||||
@ -3264,8 +3264,8 @@ static void loop_filter(H264Context *h, int start_x, int end_x){
|
|||||||
backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, CHROMA444, 0);
|
backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, CHROMA444, 0);
|
||||||
if(fill_filter_caches(h, mb_type))
|
if(fill_filter_caches(h, mb_type))
|
||||||
continue;
|
continue;
|
||||||
h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.qscale_table[mb_xy]);
|
h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mb_xy]);
|
||||||
h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.qscale_table[mb_xy]);
|
h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mb_xy]);
|
||||||
|
|
||||||
if (FRAME_MBAFF) {
|
if (FRAME_MBAFF) {
|
||||||
ff_h264_filter_mb (h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
|
ff_h264_filter_mb (h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
|
||||||
@ -3286,9 +3286,9 @@ static void predict_field_decoding_flag(H264Context *h){
|
|||||||
MpegEncContext * const s = &h->s;
|
MpegEncContext * const s = &h->s;
|
||||||
const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
|
const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
|
||||||
int mb_type = (h->slice_table[mb_xy-1] == h->slice_num)
|
int mb_type = (h->slice_table[mb_xy-1] == h->slice_num)
|
||||||
? s->current_picture.mb_type[mb_xy-1]
|
? s->current_picture.f.mb_type[mb_xy - 1]
|
||||||
: (h->slice_table[mb_xy-s->mb_stride] == h->slice_num)
|
: (h->slice_table[mb_xy-s->mb_stride] == h->slice_num)
|
||||||
? s->current_picture.mb_type[mb_xy-s->mb_stride]
|
? s->current_picture.f.mb_type[mb_xy - s->mb_stride]
|
||||||
: 0;
|
: 0;
|
||||||
h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
|
h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
|
||||||
}
|
}
|
||||||
@ -3667,7 +3667,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
|||||||
if((err = decode_slice_header(hx, h)))
|
if((err = decode_slice_header(hx, h)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
s->current_picture_ptr->key_frame |=
|
s->current_picture_ptr->f.key_frame |=
|
||||||
(hx->nal_unit_type == NAL_IDR_SLICE) ||
|
(hx->nal_unit_type == NAL_IDR_SLICE) ||
|
||||||
(h->sei_recovery_frame_cnt >= 0);
|
(h->sei_recovery_frame_cnt >= 0);
|
||||||
|
|
||||||
@ -3831,7 +3831,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
//FIXME factorize this with the output code below
|
//FIXME factorize this with the output code below
|
||||||
out = h->delayed_pic[0];
|
out = h->delayed_pic[0];
|
||||||
out_idx = 0;
|
out_idx = 0;
|
||||||
for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++)
|
for (i = 1; h->delayed_pic[i] && !h->delayed_pic[i]->f.key_frame && !h->delayed_pic[i]->mmco_reset; i++)
|
||||||
if(h->delayed_pic[i]->poc < out->poc){
|
if(h->delayed_pic[i]->poc < out->poc){
|
||||||
out = h->delayed_pic[i];
|
out = h->delayed_pic[i];
|
||||||
out_idx = i;
|
out_idx = i;
|
||||||
|
@ -795,8 +795,8 @@ static void fill_decode_neighbors(H264Context *h, int mb_type){
|
|||||||
left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
|
left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
|
||||||
h->left_block = left_block_options[0];
|
h->left_block = left_block_options[0];
|
||||||
if(FRAME_MBAFF){
|
if(FRAME_MBAFF){
|
||||||
const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]);
|
const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
|
||||||
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
|
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
|
||||||
if(s->mb_y&1){
|
if(s->mb_y&1){
|
||||||
if (left_mb_field_flag != curr_mb_field_flag) {
|
if (left_mb_field_flag != curr_mb_field_flag) {
|
||||||
left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1;
|
left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1;
|
||||||
@ -812,9 +812,9 @@ static void fill_decode_neighbors(H264Context *h, int mb_type){
|
|||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
if(curr_mb_field_flag){
|
if(curr_mb_field_flag){
|
||||||
topleft_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy - 1]>>7)&1)-1);
|
topleft_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy - 1] >> 7) & 1) - 1);
|
||||||
topright_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy + 1]>>7)&1)-1);
|
topright_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy + 1] >> 7) & 1) - 1);
|
||||||
top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1);
|
top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy ] >> 7) & 1) - 1);
|
||||||
}
|
}
|
||||||
if (left_mb_field_flag != curr_mb_field_flag) {
|
if (left_mb_field_flag != curr_mb_field_flag) {
|
||||||
if (curr_mb_field_flag) {
|
if (curr_mb_field_flag) {
|
||||||
@ -834,11 +834,11 @@ static void fill_decode_neighbors(H264Context *h, int mb_type){
|
|||||||
h->left_mb_xy[LBOT] = left_xy[LBOT];
|
h->left_mb_xy[LBOT] = left_xy[LBOT];
|
||||||
//FIXME do we need all in the context?
|
//FIXME do we need all in the context?
|
||||||
|
|
||||||
h->topleft_type = s->current_picture.mb_type[topleft_xy] ;
|
h->topleft_type = s->current_picture.f.mb_type[topleft_xy];
|
||||||
h->top_type = s->current_picture.mb_type[top_xy] ;
|
h->top_type = s->current_picture.f.mb_type[top_xy];
|
||||||
h->topright_type= s->current_picture.mb_type[topright_xy];
|
h->topright_type = s->current_picture.f.mb_type[topright_xy];
|
||||||
h->left_type[LTOP] = s->current_picture.mb_type[left_xy[LTOP]] ;
|
h->left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
|
||||||
h->left_type[LBOT] = s->current_picture.mb_type[left_xy[LBOT]] ;
|
h->left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
|
||||||
|
|
||||||
if(FMO){
|
if(FMO){
|
||||||
if(h->slice_table[topleft_xy ] != h->slice_num) h->topleft_type = 0;
|
if(h->slice_table[topleft_xy ] != h->slice_num) h->topleft_type = 0;
|
||||||
@ -898,7 +898,7 @@ static void fill_decode_caches(H264Context *h, int mb_type){
|
|||||||
h->left_samples_available&= 0xFF5F;
|
h->left_samples_available&= 0xFF5F;
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
int left_typei = s->current_picture.mb_type[left_xy[LTOP] + s->mb_stride];
|
int left_typei = s->current_picture.f.mb_type[left_xy[LTOP] + s->mb_stride];
|
||||||
|
|
||||||
assert(left_xy[LTOP] == left_xy[LBOT]);
|
assert(left_xy[LTOP] == left_xy[LBOT]);
|
||||||
if(!((left_typei & type_mask) && (left_type[LTOP] & type_mask))){
|
if(!((left_typei & type_mask) && (left_type[LTOP] & type_mask))){
|
||||||
@ -1016,9 +1016,9 @@ static void fill_decode_caches(H264Context *h, int mb_type){
|
|||||||
int b_stride = h->b_stride;
|
int b_stride = h->b_stride;
|
||||||
for(list=0; list<h->list_count; list++){
|
for(list=0; list<h->list_count; list++){
|
||||||
int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
|
int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
|
||||||
int8_t *ref = s->current_picture.ref_index[list];
|
int8_t *ref = s->current_picture.f.ref_index[list];
|
||||||
int16_t (*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
|
int16_t (*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
|
||||||
int16_t (*mv)[2] = s->current_picture.motion_val[list];
|
int16_t (*mv)[2] = s->current_picture.f.motion_val[list];
|
||||||
if(!USES_LIST(mb_type, list)){
|
if(!USES_LIST(mb_type, list)){
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -1240,7 +1240,7 @@ static av_always_inline void write_back_non_zero_count(H264Context *h){
|
|||||||
static av_always_inline void write_back_motion_list(H264Context *h, MpegEncContext * const s, int b_stride,
|
static av_always_inline void write_back_motion_list(H264Context *h, MpegEncContext * const s, int b_stride,
|
||||||
int b_xy, int b8_xy, int mb_type, int list )
|
int b_xy, int b8_xy, int mb_type, int list )
|
||||||
{
|
{
|
||||||
int16_t (*mv_dst)[2] = &s->current_picture.motion_val[list][b_xy];
|
int16_t (*mv_dst)[2] = &s->current_picture.f.motion_val[list][b_xy];
|
||||||
int16_t (*mv_src)[2] = &h->mv_cache[list][scan8[0]];
|
int16_t (*mv_src)[2] = &h->mv_cache[list][scan8[0]];
|
||||||
AV_COPY128(mv_dst + 0*b_stride, mv_src + 8*0);
|
AV_COPY128(mv_dst + 0*b_stride, mv_src + 8*0);
|
||||||
AV_COPY128(mv_dst + 1*b_stride, mv_src + 8*1);
|
AV_COPY128(mv_dst + 1*b_stride, mv_src + 8*1);
|
||||||
@ -1260,7 +1260,7 @@ static av_always_inline void write_back_motion_list(H264Context *h, MpegEncConte
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
int8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
|
int8_t *ref_index = &s->current_picture.f.ref_index[list][b8_xy];
|
||||||
int8_t *ref_cache = h->ref_cache[list];
|
int8_t *ref_cache = h->ref_cache[list];
|
||||||
ref_index[0+0*2]= ref_cache[scan8[0]];
|
ref_index[0+0*2]= ref_cache[scan8[0]];
|
||||||
ref_index[1+0*2]= ref_cache[scan8[4]];
|
ref_index[1+0*2]= ref_cache[scan8[4]];
|
||||||
@ -1278,7 +1278,8 @@ static av_always_inline void write_back_motion(H264Context *h, int mb_type){
|
|||||||
if(USES_LIST(mb_type, 0)){
|
if(USES_LIST(mb_type, 0)){
|
||||||
write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 0);
|
write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 0);
|
||||||
}else{
|
}else{
|
||||||
fill_rectangle(&s->current_picture.ref_index[0][b8_xy], 2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
|
fill_rectangle(&s->current_picture.f.ref_index[0][b8_xy],
|
||||||
|
2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
|
||||||
}
|
}
|
||||||
if(USES_LIST(mb_type, 1)){
|
if(USES_LIST(mb_type, 1)){
|
||||||
write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 1);
|
write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 1);
|
||||||
@ -1334,8 +1335,8 @@ static void av_unused decode_mb_skip(H264Context *h){
|
|||||||
}
|
}
|
||||||
|
|
||||||
write_back_motion(h, mb_type);
|
write_back_motion(h, mb_type);
|
||||||
s->current_picture.mb_type[mb_xy]= mb_type;
|
s->current_picture.f.mb_type[mb_xy] = mb_type;
|
||||||
s->current_picture.qscale_table[mb_xy]= s->qscale;
|
s->current_picture.f.qscale_table[mb_xy] = s->qscale;
|
||||||
h->slice_table[ mb_xy ]= h->slice_num;
|
h->slice_table[ mb_xy ]= h->slice_num;
|
||||||
h->prev_mb_skipped= 1;
|
h->prev_mb_skipped= 1;
|
||||||
}
|
}
|
||||||
|
@ -1284,8 +1284,8 @@ static int decode_cabac_field_decoding_flag(H264Context *h) {
|
|||||||
|
|
||||||
unsigned long ctx = 0;
|
unsigned long ctx = 0;
|
||||||
|
|
||||||
ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy]>>7)&(h->slice_table[mba_xy] == h->slice_num);
|
ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.f.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
|
||||||
ctx += (s->current_picture.mb_type[mbb_xy]>>7)&(h->slice_table[mbb_xy] == h->slice_num);
|
ctx += (s->current_picture.f.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num);
|
||||||
|
|
||||||
return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] );
|
return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] );
|
||||||
}
|
}
|
||||||
@ -1330,13 +1330,13 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
|
|||||||
mba_xy = mb_xy - 1;
|
mba_xy = mb_xy - 1;
|
||||||
if( (mb_y&1)
|
if( (mb_y&1)
|
||||||
&& h->slice_table[mba_xy] == h->slice_num
|
&& h->slice_table[mba_xy] == h->slice_num
|
||||||
&& MB_FIELD == !!IS_INTERLACED( s->current_picture.mb_type[mba_xy] ) )
|
&& MB_FIELD == !!IS_INTERLACED( s->current_picture.f.mb_type[mba_xy] ) )
|
||||||
mba_xy += s->mb_stride;
|
mba_xy += s->mb_stride;
|
||||||
if( MB_FIELD ){
|
if( MB_FIELD ){
|
||||||
mbb_xy = mb_xy - s->mb_stride;
|
mbb_xy = mb_xy - s->mb_stride;
|
||||||
if( !(mb_y&1)
|
if( !(mb_y&1)
|
||||||
&& h->slice_table[mbb_xy] == h->slice_num
|
&& h->slice_table[mbb_xy] == h->slice_num
|
||||||
&& IS_INTERLACED( s->current_picture.mb_type[mbb_xy] ) )
|
&& IS_INTERLACED( s->current_picture.f.mb_type[mbb_xy] ) )
|
||||||
mbb_xy -= s->mb_stride;
|
mbb_xy -= s->mb_stride;
|
||||||
}else
|
}else
|
||||||
mbb_xy = mb_x + (mb_y-1)*s->mb_stride;
|
mbb_xy = mb_x + (mb_y-1)*s->mb_stride;
|
||||||
@ -1346,9 +1346,9 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
|
|||||||
mbb_xy = mb_xy - (s->mb_stride << FIELD_PICTURE);
|
mbb_xy = mb_xy - (s->mb_stride << FIELD_PICTURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] ))
|
if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mba_xy] ))
|
||||||
ctx++;
|
ctx++;
|
||||||
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] ))
|
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mbb_xy] ))
|
||||||
ctx++;
|
ctx++;
|
||||||
|
|
||||||
if( h->slice_type_nos == AV_PICTURE_TYPE_B )
|
if( h->slice_type_nos == AV_PICTURE_TYPE_B )
|
||||||
@ -1849,7 +1849,7 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
|
|||||||
/* read skip flags */
|
/* read skip flags */
|
||||||
if( skip ) {
|
if( skip ) {
|
||||||
if( FRAME_MBAFF && (s->mb_y&1)==0 ){
|
if( FRAME_MBAFF && (s->mb_y&1)==0 ){
|
||||||
s->current_picture.mb_type[mb_xy] = MB_TYPE_SKIP;
|
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_SKIP;
|
||||||
h->next_mb_skipped = decode_cabac_mb_skip( h, s->mb_x, s->mb_y+1 );
|
h->next_mb_skipped = decode_cabac_mb_skip( h, s->mb_x, s->mb_y+1 );
|
||||||
if(!h->next_mb_skipped)
|
if(!h->next_mb_skipped)
|
||||||
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
|
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
|
||||||
@ -1965,10 +1965,10 @@ decode_intra_mb:
|
|||||||
h->cbp_table[mb_xy] = 0xf7ef;
|
h->cbp_table[mb_xy] = 0xf7ef;
|
||||||
h->chroma_pred_mode_table[mb_xy] = 0;
|
h->chroma_pred_mode_table[mb_xy] = 0;
|
||||||
// In deblocking, the quantizer is 0
|
// In deblocking, the quantizer is 0
|
||||||
s->current_picture.qscale_table[mb_xy]= 0;
|
s->current_picture.f.qscale_table[mb_xy] = 0;
|
||||||
// All coeffs are present
|
// All coeffs are present
|
||||||
memset(h->non_zero_count[mb_xy], 16, 48);
|
memset(h->non_zero_count[mb_xy], 16, 48);
|
||||||
s->current_picture.mb_type[mb_xy]= mb_type;
|
s->current_picture.f.mb_type[mb_xy] = mb_type;
|
||||||
h->last_qscale_diff = 0;
|
h->last_qscale_diff = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2265,7 +2265,7 @@ decode_intra_mb:
|
|||||||
AV_WN32A(&nnz_cache[4+8*10], top_empty);
|
AV_WN32A(&nnz_cache[4+8*10], top_empty);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s->current_picture.mb_type[mb_xy]= mb_type;
|
s->current_picture.f.mb_type[mb_xy] = mb_type;
|
||||||
|
|
||||||
if( cbp || IS_INTRA16x16( mb_type ) ) {
|
if( cbp || IS_INTRA16x16( mb_type ) ) {
|
||||||
const uint8_t *scan, *scan8x8;
|
const uint8_t *scan, *scan8x8;
|
||||||
@ -2344,7 +2344,7 @@ decode_intra_mb:
|
|||||||
h->last_qscale_diff = 0;
|
h->last_qscale_diff = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->current_picture.qscale_table[mb_xy]= s->qscale;
|
s->current_picture.f.qscale_table[mb_xy] = s->qscale;
|
||||||
write_back_non_zero_count(h);
|
write_back_non_zero_count(h);
|
||||||
|
|
||||||
if(MB_MBAFF){
|
if(MB_MBAFF){
|
||||||
|
@ -689,11 +689,11 @@ decode_intra_mb:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// In deblocking, the quantizer is 0
|
// In deblocking, the quantizer is 0
|
||||||
s->current_picture.qscale_table[mb_xy]= 0;
|
s->current_picture.f.qscale_table[mb_xy] = 0;
|
||||||
// All coeffs are present
|
// All coeffs are present
|
||||||
memset(h->non_zero_count[mb_xy], 16, 48);
|
memset(h->non_zero_count[mb_xy], 16, 48);
|
||||||
|
|
||||||
s->current_picture.mb_type[mb_xy]= mb_type;
|
s->current_picture.f.mb_type[mb_xy] = mb_type;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -990,7 +990,7 @@ decode_intra_mb:
|
|||||||
}
|
}
|
||||||
h->cbp=
|
h->cbp=
|
||||||
h->cbp_table[mb_xy]= cbp;
|
h->cbp_table[mb_xy]= cbp;
|
||||||
s->current_picture.mb_type[mb_xy]= mb_type;
|
s->current_picture.f.mb_type[mb_xy] = mb_type;
|
||||||
|
|
||||||
if(cbp || IS_INTRA16x16(mb_type)){
|
if(cbp || IS_INTRA16x16(mb_type)){
|
||||||
int i4x4, chroma_idx;
|
int i4x4, chroma_idx;
|
||||||
@ -1063,7 +1063,7 @@ decode_intra_mb:
|
|||||||
fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
|
fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
|
||||||
fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
|
fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
|
||||||
}
|
}
|
||||||
s->current_picture.qscale_table[mb_xy]= s->qscale;
|
s->current_picture.f.qscale_table[mb_xy] = s->qscale;
|
||||||
write_back_non_zero_count(h);
|
write_back_non_zero_count(h);
|
||||||
|
|
||||||
if(MB_MBAFF){
|
if(MB_MBAFF){
|
||||||
|
@ -87,7 +87,7 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field,
|
|||||||
poc= (poc&~3) + rfield + 1;
|
poc= (poc&~3) + rfield + 1;
|
||||||
|
|
||||||
for(j=start; j<end; j++){
|
for(j=start; j<end; j++){
|
||||||
if(4*h->ref_list[0][j].frame_num + (h->ref_list[0][j].reference&3) == poc){
|
if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) {
|
||||||
int cur_ref= mbafi ? (j-16)^field : j;
|
int cur_ref= mbafi ? (j-16)^field : j;
|
||||||
map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
|
map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
|
||||||
if(rfield == field || !interl)
|
if(rfield == field || !interl)
|
||||||
@ -105,12 +105,12 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
|
|||||||
Picture * const cur = s->current_picture_ptr;
|
Picture * const cur = s->current_picture_ptr;
|
||||||
int list, j, field;
|
int list, j, field;
|
||||||
int sidx= (s->picture_structure&1)^1;
|
int sidx= (s->picture_structure&1)^1;
|
||||||
int ref1sidx= (ref1->reference&1)^1;
|
int ref1sidx = (ref1->f.reference&1)^1;
|
||||||
|
|
||||||
for(list=0; list<2; list++){
|
for(list=0; list<2; list++){
|
||||||
cur->ref_count[sidx][list] = h->ref_count[list];
|
cur->ref_count[sidx][list] = h->ref_count[list];
|
||||||
for(j=0; j<h->ref_count[list]; j++)
|
for(j=0; j<h->ref_count[list]; j++)
|
||||||
cur->ref_poc[sidx][list][j] = 4*h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference&3);
|
cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(s->picture_structure == PICT_FRAME){
|
if(s->picture_structure == PICT_FRAME){
|
||||||
@ -126,11 +126,11 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
|
|||||||
int *col_poc = h->ref_list[1]->field_poc;
|
int *col_poc = h->ref_list[1]->field_poc;
|
||||||
h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
|
h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
|
||||||
ref1sidx=sidx= h->col_parity;
|
ref1sidx=sidx= h->col_parity;
|
||||||
}else if(!(s->picture_structure & h->ref_list[1][0].reference) && !h->ref_list[1][0].mbaff){ // FL -> FL & differ parity
|
} else if (!(s->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
|
||||||
h->col_fieldoff= 2*(h->ref_list[1][0].reference) - 3;
|
h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(cur->pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
|
if (cur->f.pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for(list=0; list<2; list++){
|
for(list=0; list<2; list++){
|
||||||
@ -143,7 +143,7 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
|
|||||||
|
|
||||||
static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
|
static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
|
||||||
{
|
{
|
||||||
int ref_field = ref->reference - 1;
|
int ref_field = ref->f.reference - 1;
|
||||||
int ref_field_picture = ref->field_picture;
|
int ref_field_picture = ref->field_picture;
|
||||||
int ref_height = 16*h->s.mb_height >> ref_field_picture;
|
int ref_height = 16*h->s.mb_height >> ref_field_picture;
|
||||||
|
|
||||||
@ -234,8 +234,8 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
|
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
|
||||||
if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL
|
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
|
||||||
mb_y = (s->mb_y&~1) + h->col_parity;
|
mb_y = (s->mb_y&~1) + h->col_parity;
|
||||||
mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
|
mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
|
||||||
b8_stride = 0;
|
b8_stride = 0;
|
||||||
@ -248,8 +248,8 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
|
|||||||
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
|
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
|
||||||
mb_y = s->mb_y&~1;
|
mb_y = s->mb_y&~1;
|
||||||
mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
|
mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
|
||||||
mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
|
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
|
||||||
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
|
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
|
||||||
b8_stride = 2+4*s->mb_stride;
|
b8_stride = 2+4*s->mb_stride;
|
||||||
b4_stride *= 6;
|
b4_stride *= 6;
|
||||||
|
|
||||||
@ -264,7 +264,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
|
|||||||
}else{ // AFR/FR -> AFR/FR
|
}else{ // AFR/FR -> AFR/FR
|
||||||
single_col:
|
single_col:
|
||||||
mb_type_col[0] =
|
mb_type_col[0] =
|
||||||
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
|
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
|
||||||
|
|
||||||
sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
|
sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
|
||||||
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
|
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
|
||||||
@ -284,10 +284,10 @@ single_col:
|
|||||||
|
|
||||||
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
|
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
|
||||||
|
|
||||||
l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
|
l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
|
||||||
l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
|
l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
|
||||||
l1ref0 = &h->ref_list[1][0].ref_index [0][4*mb_xy];
|
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
|
||||||
l1ref1 = &h->ref_list[1][0].ref_index [1][4*mb_xy];
|
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
|
||||||
if(!b8_stride){
|
if(!b8_stride){
|
||||||
if(s->mb_y&1){
|
if(s->mb_y&1){
|
||||||
l1ref0 += 2;
|
l1ref0 += 2;
|
||||||
@ -420,8 +420,8 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
|
|||||||
|
|
||||||
await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type));
|
await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type));
|
||||||
|
|
||||||
if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
|
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
|
||||||
if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL
|
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
|
||||||
mb_y = (s->mb_y&~1) + h->col_parity;
|
mb_y = (s->mb_y&~1) + h->col_parity;
|
||||||
mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
|
mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
|
||||||
b8_stride = 0;
|
b8_stride = 0;
|
||||||
@ -434,8 +434,8 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
|
|||||||
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
|
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
|
||||||
mb_y = s->mb_y&~1;
|
mb_y = s->mb_y&~1;
|
||||||
mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
|
mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
|
||||||
mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
|
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
|
||||||
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
|
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
|
||||||
b8_stride = 2+4*s->mb_stride;
|
b8_stride = 2+4*s->mb_stride;
|
||||||
b4_stride *= 6;
|
b4_stride *= 6;
|
||||||
|
|
||||||
@ -451,7 +451,7 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
|
|||||||
}else{ // AFR/FR -> AFR/FR
|
}else{ // AFR/FR -> AFR/FR
|
||||||
single_col:
|
single_col:
|
||||||
mb_type_col[0] =
|
mb_type_col[0] =
|
||||||
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
|
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
|
||||||
|
|
||||||
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
|
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
|
||||||
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
|
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
|
||||||
@ -471,10 +471,10 @@ single_col:
|
|||||||
|
|
||||||
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
|
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
|
||||||
|
|
||||||
l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
|
l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
|
||||||
l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
|
l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
|
||||||
l1ref0 = &h->ref_list[1][0].ref_index [0][4*mb_xy];
|
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
|
||||||
l1ref1 = &h->ref_list[1][0].ref_index [1][4*mb_xy];
|
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
|
||||||
if(!b8_stride){
|
if(!b8_stride){
|
||||||
if(s->mb_y&1){
|
if(s->mb_y&1){
|
||||||
l1ref0 += 2;
|
l1ref0 += 2;
|
||||||
|
@ -230,10 +230,10 @@ void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y,
|
|||||||
left_type= h->left_type[LTOP];
|
left_type= h->left_type[LTOP];
|
||||||
top_type= h->top_type;
|
top_type= h->top_type;
|
||||||
|
|
||||||
mb_type = s->current_picture.mb_type[mb_xy];
|
mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||||
qp = s->current_picture.qscale_table[mb_xy];
|
qp = s->current_picture.f.qscale_table[mb_xy];
|
||||||
qp0 = s->current_picture.qscale_table[mb_xy-1];
|
qp0 = s->current_picture.f.qscale_table[mb_xy - 1];
|
||||||
qp1 = s->current_picture.qscale_table[h->top_mb_xy];
|
qp1 = s->current_picture.f.qscale_table[h->top_mb_xy];
|
||||||
qpc = get_chroma_qp( h, 0, qp );
|
qpc = get_chroma_qp( h, 0, qp );
|
||||||
qpc0 = get_chroma_qp( h, 0, qp0 );
|
qpc0 = get_chroma_qp( h, 0, qp0 );
|
||||||
qpc1 = get_chroma_qp( h, 0, qp1 );
|
qpc1 = get_chroma_qp( h, 0, qp1 );
|
||||||
@ -435,10 +435,10 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
|
|||||||
for(j=0; j<2; j++, mbn_xy += s->mb_stride){
|
for(j=0; j<2; j++, mbn_xy += s->mb_stride){
|
||||||
DECLARE_ALIGNED(8, int16_t, bS)[4];
|
DECLARE_ALIGNED(8, int16_t, bS)[4];
|
||||||
int qp;
|
int qp;
|
||||||
if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) {
|
if (IS_INTRA(mb_type | s->current_picture.f.mb_type[mbn_xy])) {
|
||||||
AV_WN64A(bS, 0x0003000300030003ULL);
|
AV_WN64A(bS, 0x0003000300030003ULL);
|
||||||
} else {
|
} else {
|
||||||
if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){
|
if (!CABAC && IS_8x8DCT(s->current_picture.f.mb_type[mbn_xy])) {
|
||||||
bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]);
|
bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]);
|
||||||
bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]);
|
bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]);
|
||||||
bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]);
|
bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]);
|
||||||
@ -453,12 +453,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
|
|||||||
}
|
}
|
||||||
// Do not use s->qscale as luma quantizer because it has not the same
|
// Do not use s->qscale as luma quantizer because it has not the same
|
||||||
// value in IPCM macroblocks.
|
// value in IPCM macroblocks.
|
||||||
qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1;
|
qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbn_xy] + 1) >> 1;
|
||||||
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
|
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
|
||||||
{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
|
{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
|
||||||
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, h );
|
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, h );
|
||||||
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
|
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1;
|
||||||
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
|
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1;
|
||||||
if (chroma) {
|
if (chroma) {
|
||||||
if (chroma444) {
|
if (chroma444) {
|
||||||
filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], h);
|
filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], h);
|
||||||
@ -518,12 +518,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
|
|||||||
// Do not use s->qscale as luma quantizer because it has not the same
|
// Do not use s->qscale as luma quantizer because it has not the same
|
||||||
// value in IPCM macroblocks.
|
// value in IPCM macroblocks.
|
||||||
if(bS[0]+bS[1]+bS[2]+bS[3]){
|
if(bS[0]+bS[1]+bS[2]+bS[3]){
|
||||||
qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbm_xy] + 1 ) >> 1;
|
qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbm_xy] + 1) >> 1;
|
||||||
//tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
|
//tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
|
||||||
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
|
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
|
||||||
//{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
|
//{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
|
||||||
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
|
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1;
|
||||||
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
|
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1;
|
||||||
if( dir == 0 ) {
|
if( dir == 0 ) {
|
||||||
filter_mb_edgev( &img_y[0], linesize, bS, qp, h );
|
filter_mb_edgev( &img_y[0], linesize, bS, qp, h );
|
||||||
if (chroma) {
|
if (chroma) {
|
||||||
@ -602,7 +602,7 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
|
|||||||
/* Filter edge */
|
/* Filter edge */
|
||||||
// Do not use s->qscale as luma quantizer because it has not the same
|
// Do not use s->qscale as luma quantizer because it has not the same
|
||||||
// value in IPCM macroblocks.
|
// value in IPCM macroblocks.
|
||||||
qp = s->current_picture.qscale_table[mb_xy];
|
qp = s->current_picture.f.qscale_table[mb_xy];
|
||||||
//tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
|
//tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
|
||||||
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
|
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
|
||||||
//{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
|
//{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
|
||||||
@ -635,7 +635,7 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
|
|||||||
void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
|
void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
|
||||||
MpegEncContext * const s = &h->s;
|
MpegEncContext * const s = &h->s;
|
||||||
const int mb_xy= mb_x + mb_y*s->mb_stride;
|
const int mb_xy= mb_x + mb_y*s->mb_stride;
|
||||||
const int mb_type = s->current_picture.mb_type[mb_xy];
|
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||||
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
|
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
|
||||||
int first_vertical_edge_done = 0;
|
int first_vertical_edge_done = 0;
|
||||||
av_unused int dir;
|
av_unused int dir;
|
||||||
@ -688,9 +688,9 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mb_qp = s->current_picture.qscale_table[mb_xy];
|
mb_qp = s->current_picture.f.qscale_table[mb_xy];
|
||||||
mbn0_qp = s->current_picture.qscale_table[h->left_mb_xy[0]];
|
mbn0_qp = s->current_picture.f.qscale_table[h->left_mb_xy[0]];
|
||||||
mbn1_qp = s->current_picture.qscale_table[h->left_mb_xy[1]];
|
mbn1_qp = s->current_picture.f.qscale_table[h->left_mb_xy[1]];
|
||||||
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
|
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
|
||||||
bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
|
bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
|
||||||
get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;
|
get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;
|
||||||
|
@ -48,15 +48,15 @@ static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C,
|
|||||||
const int mb_type = mb_types[xy+(y4>>2)*s->mb_stride];\
|
const int mb_type = mb_types[xy+(y4>>2)*s->mb_stride];\
|
||||||
if(!USES_LIST(mb_type,list))\
|
if(!USES_LIST(mb_type,list))\
|
||||||
return LIST_NOT_USED;\
|
return LIST_NOT_USED;\
|
||||||
mv = s->current_picture_ptr->motion_val[list][h->mb2b_xy[xy]+3 + y4*h->b_stride];\
|
mv = s->current_picture_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4*h->b_stride];\
|
||||||
h->mv_cache[list][scan8[0]-2][0] = mv[0];\
|
h->mv_cache[list][scan8[0]-2][0] = mv[0];\
|
||||||
h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\
|
h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\
|
||||||
return s->current_picture_ptr->ref_index[list][4*xy+1 + (y4&~1)] REF_OP;
|
return s->current_picture_ptr->f.ref_index[list][4*xy + 1 + (y4 & ~1)] REF_OP;
|
||||||
|
|
||||||
if(topright_ref == PART_NOT_AVAILABLE
|
if(topright_ref == PART_NOT_AVAILABLE
|
||||||
&& i >= scan8[0]+8 && (i&7)==4
|
&& i >= scan8[0]+8 && (i&7)==4
|
||||||
&& h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
|
&& h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
|
||||||
const uint32_t *mb_types = s->current_picture_ptr->mb_type;
|
const uint32_t *mb_types = s->current_picture_ptr->f.mb_type;
|
||||||
const int16_t *mv;
|
const int16_t *mv;
|
||||||
AV_ZERO32(h->mv_cache[list][scan8[0]-2]);
|
AV_ZERO32(h->mv_cache[list][scan8[0]-2]);
|
||||||
*C = h->mv_cache[list][scan8[0]-2];
|
*C = h->mv_cache[list][scan8[0]-2];
|
||||||
@ -236,8 +236,8 @@ static av_always_inline void pred_pskip_motion(H264Context * const h){
|
|||||||
DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = {0};
|
DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = {0};
|
||||||
DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
|
DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
|
||||||
MpegEncContext * const s = &h->s;
|
MpegEncContext * const s = &h->s;
|
||||||
int8_t *ref = s->current_picture.ref_index[0];
|
int8_t *ref = s->current_picture.f.ref_index[0];
|
||||||
int16_t (*mv)[2] = s->current_picture.motion_val[0];
|
int16_t (*mv)[2] = s->current_picture.f.motion_val[0];
|
||||||
int top_ref, left_ref, diagonal_ref, match_count, mx, my;
|
int top_ref, left_ref, diagonal_ref, match_count, mx, my;
|
||||||
const int16_t *A, *B, *C;
|
const int16_t *A, *B, *C;
|
||||||
int b_stride = h->b_stride;
|
int b_stride = h->b_stride;
|
||||||
|
@ -39,16 +39,16 @@ static void pic_as_field(Picture *pic, const int parity){
|
|||||||
int i;
|
int i;
|
||||||
for (i = 0; i < 4; ++i) {
|
for (i = 0; i < 4; ++i) {
|
||||||
if (parity == PICT_BOTTOM_FIELD)
|
if (parity == PICT_BOTTOM_FIELD)
|
||||||
pic->data[i] += pic->linesize[i];
|
pic->f.data[i] += pic->f.linesize[i];
|
||||||
pic->reference = parity;
|
pic->f.reference = parity;
|
||||||
pic->linesize[i] *= 2;
|
pic->f.linesize[i] *= 2;
|
||||||
}
|
}
|
||||||
pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD];
|
pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD];
|
||||||
}
|
}
|
||||||
|
|
||||||
static int split_field_copy(Picture *dest, Picture *src,
|
static int split_field_copy(Picture *dest, Picture *src,
|
||||||
int parity, int id_add){
|
int parity, int id_add){
|
||||||
int match = !!(src->reference & parity);
|
int match = !!(src->f.reference & parity);
|
||||||
|
|
||||||
if (match) {
|
if (match) {
|
||||||
*dest = *src;
|
*dest = *src;
|
||||||
@ -67,9 +67,9 @@ static int build_def_list(Picture *def, Picture **in, int len, int is_long, int
|
|||||||
int index=0;
|
int index=0;
|
||||||
|
|
||||||
while(i[0]<len || i[1]<len){
|
while(i[0]<len || i[1]<len){
|
||||||
while(i[0]<len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel)))
|
while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->f.reference & sel)))
|
||||||
i[0]++;
|
i[0]++;
|
||||||
while(i[1]<len && !(in[ i[1] ] && (in[ i[1] ]->reference & (sel^3))))
|
while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3))))
|
||||||
i[1]++;
|
i[1]++;
|
||||||
if(i[0] < len){
|
if(i[0] < len){
|
||||||
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
|
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
|
||||||
@ -133,7 +133,7 @@ int ff_h264_fill_default_ref_list(H264Context *h){
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(lens[0] == lens[1] && lens[1] > 1){
|
if(lens[0] == lens[1] && lens[1] > 1){
|
||||||
for(i=0; h->default_ref_list[0][i].data[0] == h->default_ref_list[1][i].data[0] && i<lens[0]; i++);
|
for (i = 0; h->default_ref_list[0][i].f.data[0] == h->default_ref_list[1][i].f.data[0] && i < lens[0]; i++);
|
||||||
if(i == lens[0])
|
if(i == lens[0])
|
||||||
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
|
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
|
||||||
}
|
}
|
||||||
@ -229,11 +229,11 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
|
|||||||
|
|
||||||
for(i= h->short_ref_count-1; i>=0; i--){
|
for(i= h->short_ref_count-1; i>=0; i--){
|
||||||
ref = h->short_ref[i];
|
ref = h->short_ref[i];
|
||||||
assert(ref->reference);
|
assert(ref->f.reference);
|
||||||
assert(!ref->long_ref);
|
assert(!ref->long_ref);
|
||||||
if(
|
if(
|
||||||
ref->frame_num == frame_num &&
|
ref->frame_num == frame_num &&
|
||||||
(ref->reference & pic_structure)
|
(ref->f.reference & pic_structure)
|
||||||
)
|
)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -250,8 +250,8 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
ref = h->long_ref[long_idx];
|
ref = h->long_ref[long_idx];
|
||||||
assert(!(ref && !ref->reference));
|
assert(!(ref && !ref->f.reference));
|
||||||
if(ref && (ref->reference & pic_structure)){
|
if (ref && (ref->f.reference & pic_structure)) {
|
||||||
ref->pic_id= pic_id;
|
ref->pic_id= pic_id;
|
||||||
assert(ref->long_ref);
|
assert(ref->long_ref);
|
||||||
i=0;
|
i=0;
|
||||||
@ -285,9 +285,9 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
|
|||||||
}
|
}
|
||||||
for(list=0; list<h->list_count; list++){
|
for(list=0; list<h->list_count; list++){
|
||||||
for(index= 0; index < h->ref_count[list]; index++){
|
for(index= 0; index < h->ref_count[list]; index++){
|
||||||
if(!h->ref_list[list][index].data[0]){
|
if (!h->ref_list[list][index].f.data[0]) {
|
||||||
av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture\n");
|
av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture\n");
|
||||||
if(h->default_ref_list[list][0].data[0])
|
if (h->default_ref_list[list][0].f.data[0])
|
||||||
h->ref_list[list][index]= h->default_ref_list[list][0];
|
h->ref_list[list][index]= h->default_ref_list[list][0];
|
||||||
else
|
else
|
||||||
return -1;
|
return -1;
|
||||||
@ -306,13 +306,13 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
|
|||||||
Picture *field = &h->ref_list[list][16+2*i];
|
Picture *field = &h->ref_list[list][16+2*i];
|
||||||
field[0] = *frame;
|
field[0] = *frame;
|
||||||
for(j=0; j<3; j++)
|
for(j=0; j<3; j++)
|
||||||
field[0].linesize[j] <<= 1;
|
field[0].f.linesize[j] <<= 1;
|
||||||
field[0].reference = PICT_TOP_FIELD;
|
field[0].f.reference = PICT_TOP_FIELD;
|
||||||
field[0].poc= field[0].field_poc[0];
|
field[0].poc= field[0].field_poc[0];
|
||||||
field[1] = field[0];
|
field[1] = field[0];
|
||||||
for(j=0; j<3; j++)
|
for(j=0; j<3; j++)
|
||||||
field[1].data[j] += frame->linesize[j];
|
field[1].f.data[j] += frame->f.linesize[j];
|
||||||
field[1].reference = PICT_BOTTOM_FIELD;
|
field[1].f.reference = PICT_BOTTOM_FIELD;
|
||||||
field[1].poc= field[1].field_poc[1];
|
field[1].poc= field[1].field_poc[1];
|
||||||
|
|
||||||
h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0];
|
h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0];
|
||||||
@ -338,12 +338,12 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
|
|||||||
*/
|
*/
|
||||||
static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
|
static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
|
||||||
int i;
|
int i;
|
||||||
if (pic->reference &= refmask) {
|
if (pic->f.reference &= refmask) {
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
for(i = 0; h->delayed_pic[i]; i++)
|
for(i = 0; h->delayed_pic[i]; i++)
|
||||||
if(pic == h->delayed_pic[i]){
|
if(pic == h->delayed_pic[i]){
|
||||||
pic->reference=DELAYED_PIC_REF;
|
pic->f.reference = DELAYED_PIC_REF;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
@ -453,7 +453,8 @@ static void print_short_term(H264Context *h) {
|
|||||||
av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:\n");
|
av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:\n");
|
||||||
for(i=0; i<h->short_ref_count; i++){
|
for(i=0; i<h->short_ref_count; i++){
|
||||||
Picture *pic= h->short_ref[i];
|
Picture *pic= h->short_ref[i];
|
||||||
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
|
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n",
|
||||||
|
i, pic->frame_num, pic->poc, pic->f.data[0]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -468,7 +469,8 @@ static void print_long_term(H264Context *h) {
|
|||||||
for(i = 0; i < 16; i++){
|
for(i = 0; i < 16; i++){
|
||||||
Picture *pic= h->long_ref[i];
|
Picture *pic= h->long_ref[i];
|
||||||
if (pic) {
|
if (pic) {
|
||||||
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
|
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n",
|
||||||
|
i, pic->frame_num, pic->poc, pic->f.data[0]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -480,7 +482,7 @@ void ff_generate_sliding_window_mmcos(H264Context *h) {
|
|||||||
|
|
||||||
h->mmco_index= 0;
|
h->mmco_index= 0;
|
||||||
if(h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
|
if(h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
|
||||||
!(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->reference)) {
|
!(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->f.reference)) {
|
||||||
h->mmco[0].opcode= MMCO_SHORT2UNUSED;
|
h->mmco[0].opcode= MMCO_SHORT2UNUSED;
|
||||||
h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num;
|
h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num;
|
||||||
h->mmco_index= 1;
|
h->mmco_index= 1;
|
||||||
@ -561,7 +563,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
|||||||
h->long_ref_count++;
|
h->long_ref_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->current_picture_ptr->reference |= s->picture_structure;
|
s->current_picture_ptr->f.reference |= s->picture_structure;
|
||||||
current_ref_assigned=1;
|
current_ref_assigned=1;
|
||||||
break;
|
break;
|
||||||
case MMCO_SET_MAX_LONG:
|
case MMCO_SET_MAX_LONG:
|
||||||
@ -600,7 +602,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
|||||||
*/
|
*/
|
||||||
if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) {
|
if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) {
|
||||||
/* Just mark the second field valid */
|
/* Just mark the second field valid */
|
||||||
s->current_picture_ptr->reference = PICT_FRAME;
|
s->current_picture_ptr->f.reference = PICT_FRAME;
|
||||||
} else if (s->current_picture_ptr->long_ref) {
|
} else if (s->current_picture_ptr->long_ref) {
|
||||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference "
|
av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference "
|
||||||
"assignment for second field "
|
"assignment for second field "
|
||||||
@ -617,7 +619,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
|||||||
|
|
||||||
h->short_ref[0]= s->current_picture_ptr;
|
h->short_ref[0]= s->current_picture_ptr;
|
||||||
h->short_ref_count++;
|
h->short_ref_count++;
|
||||||
s->current_picture_ptr->reference |= s->picture_structure;
|
s->current_picture_ptr->f.reference |= s->picture_structure;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,7 +304,7 @@ static int x8_setup_spatial_predictor(IntraX8Context * const w, const int chroma
|
|||||||
int quant;
|
int quant;
|
||||||
|
|
||||||
s->dsp.x8_setup_spatial_compensation(s->dest[chroma], s->edge_emu_buffer,
|
s->dsp.x8_setup_spatial_compensation(s->dest[chroma], s->edge_emu_buffer,
|
||||||
s->current_picture.linesize[chroma>0],
|
s->current_picture.f.linesize[chroma>0],
|
||||||
&range, &sum, w->edges);
|
&range, &sum, w->edges);
|
||||||
if(chroma){
|
if(chroma){
|
||||||
w->orient=w->chroma_orient;
|
w->orient=w->chroma_orient;
|
||||||
@ -613,7 +613,7 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){
|
|||||||
dc_level+= (w->predicted_dc*divide_quant + (1<<12) )>>13;
|
dc_level+= (w->predicted_dc*divide_quant + (1<<12) )>>13;
|
||||||
|
|
||||||
dsp_x8_put_solidcolor( av_clip_uint8((dc_level*dc_quant+4)>>3),
|
dsp_x8_put_solidcolor( av_clip_uint8((dc_level*dc_quant+4)>>3),
|
||||||
s->dest[chroma], s->current_picture.linesize[!!chroma]);
|
s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
|
||||||
|
|
||||||
goto block_placed;
|
goto block_placed;
|
||||||
}
|
}
|
||||||
@ -637,15 +637,15 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(w->flat_dc){
|
if(w->flat_dc){
|
||||||
dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.linesize[!!chroma]);
|
dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
|
||||||
}else{
|
}else{
|
||||||
s->dsp.x8_spatial_compensation[w->orient]( s->edge_emu_buffer,
|
s->dsp.x8_spatial_compensation[w->orient]( s->edge_emu_buffer,
|
||||||
s->dest[chroma],
|
s->dest[chroma],
|
||||||
s->current_picture.linesize[!!chroma] );
|
s->current_picture.f.linesize[!!chroma] );
|
||||||
}
|
}
|
||||||
if(!zeros_only)
|
if(!zeros_only)
|
||||||
s->dsp.idct_add ( s->dest[chroma],
|
s->dsp.idct_add ( s->dest[chroma],
|
||||||
s->current_picture.linesize[!!chroma],
|
s->current_picture.f.linesize[!!chroma],
|
||||||
s->block[0] );
|
s->block[0] );
|
||||||
|
|
||||||
block_placed:
|
block_placed:
|
||||||
@ -656,7 +656,7 @@ block_placed:
|
|||||||
|
|
||||||
if(s->loop_filter){
|
if(s->loop_filter){
|
||||||
uint8_t* ptr = s->dest[chroma];
|
uint8_t* ptr = s->dest[chroma];
|
||||||
int linesize = s->current_picture.linesize[!!chroma];
|
int linesize = s->current_picture.f.linesize[!!chroma];
|
||||||
|
|
||||||
if(!( (w->edges&2) || ( zeros_only && (w->orient|4)==4 ) )){
|
if(!( (w->edges&2) || ( zeros_only && (w->orient|4)==4 ) )){
|
||||||
s->dsp.x8_h_loop_filter(ptr, linesize, w->quant);
|
s->dsp.x8_h_loop_filter(ptr, linesize, w->quant);
|
||||||
@ -671,12 +671,12 @@ block_placed:
|
|||||||
static void x8_init_block_index(MpegEncContext *s){ //FIXME maybe merge with ff_*
|
static void x8_init_block_index(MpegEncContext *s){ //FIXME maybe merge with ff_*
|
||||||
//not s->linesize as this would be wrong for field pics
|
//not s->linesize as this would be wrong for field pics
|
||||||
//not that IntraX8 has interlacing support ;)
|
//not that IntraX8 has interlacing support ;)
|
||||||
const int linesize = s->current_picture.linesize[0];
|
const int linesize = s->current_picture.f.linesize[0];
|
||||||
const int uvlinesize= s->current_picture.linesize[1];
|
const int uvlinesize = s->current_picture.f.linesize[1];
|
||||||
|
|
||||||
s->dest[0] = s->current_picture.data[0];
|
s->dest[0] = s->current_picture.f.data[0];
|
||||||
s->dest[1] = s->current_picture.data[1];
|
s->dest[1] = s->current_picture.f.data[1];
|
||||||
s->dest[2] = s->current_picture.data[2];
|
s->dest[2] = s->current_picture.f.data[2];
|
||||||
|
|
||||||
s->dest[0] += s->mb_y * linesize << 3;
|
s->dest[0] += s->mb_y * linesize << 3;
|
||||||
s->dest[1] += ( s->mb_y&(~1) ) * uvlinesize << 2;//chroma blocks are on add rows
|
s->dest[1] += ( s->mb_y&(~1) ) * uvlinesize << 2;//chroma blocks are on add rows
|
||||||
@ -771,7 +771,7 @@ int ff_intrax8_decode_picture(IntraX8Context * const w, int dquant, int quant_of
|
|||||||
/*emulate MB info in the relevant tables*/
|
/*emulate MB info in the relevant tables*/
|
||||||
s->mbskip_table [mb_xy]=0;
|
s->mbskip_table [mb_xy]=0;
|
||||||
s->mbintra_table[mb_xy]=1;
|
s->mbintra_table[mb_xy]=1;
|
||||||
s->current_picture.qscale_table[mb_xy]=w->quant;
|
s->current_picture.f.qscale_table[mb_xy] = w->quant;
|
||||||
mb_xy++;
|
mb_xy++;
|
||||||
}
|
}
|
||||||
s->dest[0]+= 8;
|
s->dest[0]+= 8;
|
||||||
|
@ -353,20 +353,20 @@ static void preview_obmc(MpegEncContext *s){
|
|||||||
do{
|
do{
|
||||||
if (get_bits1(&s->gb)) {
|
if (get_bits1(&s->gb)) {
|
||||||
/* skip mb */
|
/* skip mb */
|
||||||
mot_val = s->current_picture.motion_val[0][ s->block_index[0] ];
|
mot_val = s->current_picture.f.motion_val[0][s->block_index[0]];
|
||||||
mot_val[0 ]= mot_val[2 ]=
|
mot_val[0 ]= mot_val[2 ]=
|
||||||
mot_val[0+stride]= mot_val[2+stride]= 0;
|
mot_val[0+stride]= mot_val[2+stride]= 0;
|
||||||
mot_val[1 ]= mot_val[3 ]=
|
mot_val[1 ]= mot_val[3 ]=
|
||||||
mot_val[1+stride]= mot_val[3+stride]= 0;
|
mot_val[1+stride]= mot_val[3+stride]= 0;
|
||||||
|
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
|
cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
|
||||||
}while(cbpc == 20);
|
}while(cbpc == 20);
|
||||||
|
|
||||||
if(cbpc & 4){
|
if(cbpc & 4){
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
|
||||||
}else{
|
}else{
|
||||||
get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
|
get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
|
||||||
if (cbpc & 8) {
|
if (cbpc & 8) {
|
||||||
@ -378,7 +378,7 @@ static void preview_obmc(MpegEncContext *s){
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((cbpc & 16) == 0) {
|
if ((cbpc & 16) == 0) {
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
/* 16x16 motion prediction */
|
/* 16x16 motion prediction */
|
||||||
mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||||
if (s->umvplus)
|
if (s->umvplus)
|
||||||
@ -396,7 +396,7 @@ static void preview_obmc(MpegEncContext *s){
|
|||||||
mot_val[1 ]= mot_val[3 ]=
|
mot_val[1 ]= mot_val[3 ]=
|
||||||
mot_val[1+stride]= mot_val[3+stride]= my;
|
mot_val[1+stride]= mot_val[3+stride]= my;
|
||||||
} else {
|
} else {
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||||
for(i=0;i<4;i++) {
|
for(i=0;i<4;i++) {
|
||||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||||
if (s->umvplus)
|
if (s->umvplus)
|
||||||
@ -618,7 +618,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
|||||||
s->block_last_index[i] = -1;
|
s->block_last_index[i] = -1;
|
||||||
s->mv_dir = MV_DIR_FORWARD;
|
s->mv_dir = MV_DIR_FORWARD;
|
||||||
s->mv_type = MV_TYPE_16X16;
|
s->mv_type = MV_TYPE_16X16;
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
s->mv[0][0][0] = 0;
|
s->mv[0][0][0] = 0;
|
||||||
s->mv[0][0][1] = 0;
|
s->mv[0][0][1] = 0;
|
||||||
s->mb_skipped = !(s->obmc | s->loop_filter);
|
s->mb_skipped = !(s->obmc | s->loop_filter);
|
||||||
@ -651,7 +651,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
|||||||
|
|
||||||
s->mv_dir = MV_DIR_FORWARD;
|
s->mv_dir = MV_DIR_FORWARD;
|
||||||
if ((cbpc & 16) == 0) {
|
if ((cbpc & 16) == 0) {
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
/* 16x16 motion prediction */
|
/* 16x16 motion prediction */
|
||||||
s->mv_type = MV_TYPE_16X16;
|
s->mv_type = MV_TYPE_16X16;
|
||||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||||
@ -676,7 +676,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
|||||||
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
|
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
|
||||||
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
|
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
|
||||||
} else {
|
} else {
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||||
s->mv_type = MV_TYPE_8X8;
|
s->mv_type = MV_TYPE_8X8;
|
||||||
for(i=0;i<4;i++) {
|
for(i=0;i<4;i++) {
|
||||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||||
@ -704,8 +704,8 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
|||||||
} else if(s->pict_type==AV_PICTURE_TYPE_B) {
|
} else if(s->pict_type==AV_PICTURE_TYPE_B) {
|
||||||
int mb_type;
|
int mb_type;
|
||||||
const int stride= s->b8_stride;
|
const int stride= s->b8_stride;
|
||||||
int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ];
|
int16_t *mot_val0 = s->current_picture.f.motion_val[0][2 * (s->mb_x + s->mb_y * stride)];
|
||||||
int16_t *mot_val1 = s->current_picture.motion_val[1][ 2*(s->mb_x + s->mb_y*stride) ];
|
int16_t *mot_val1 = s->current_picture.f.motion_val[1][2 * (s->mb_x + s->mb_y * stride)];
|
||||||
// const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
|
// const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
|
||||||
|
|
||||||
//FIXME ugly
|
//FIXME ugly
|
||||||
@ -788,7 +788,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s->current_picture.mb_type[xy]= mb_type;
|
s->current_picture.f.mb_type[xy] = mb_type;
|
||||||
} else { /* I-Frame */
|
} else { /* I-Frame */
|
||||||
do{
|
do{
|
||||||
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
|
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
|
||||||
@ -803,11 +803,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
|||||||
dquant = cbpc & 4;
|
dquant = cbpc & 4;
|
||||||
s->mb_intra = 1;
|
s->mb_intra = 1;
|
||||||
intra:
|
intra:
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
|
||||||
if (s->h263_aic) {
|
if (s->h263_aic) {
|
||||||
s->ac_pred = get_bits1(&s->gb);
|
s->ac_pred = get_bits1(&s->gb);
|
||||||
if(s->ac_pred){
|
if(s->ac_pred){
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
|
||||||
|
|
||||||
s->h263_aic_dir = get_bits1(&s->gb);
|
s->h263_aic_dir = get_bits1(&s->gb);
|
||||||
}
|
}
|
||||||
@ -889,7 +889,7 @@ int h263_decode_picture_header(MpegEncContext *s)
|
|||||||
i = get_bits(&s->gb, 8); /* picture timestamp */
|
i = get_bits(&s->gb, 8); /* picture timestamp */
|
||||||
if( (s->picture_number&~0xFF)+i < s->picture_number)
|
if( (s->picture_number&~0xFF)+i < s->picture_number)
|
||||||
i+= 256;
|
i+= 256;
|
||||||
s->current_picture_ptr->pts=
|
s->current_picture_ptr->f.pts =
|
||||||
s->picture_number= (s->picture_number&~0xFF) + i;
|
s->picture_number= (s->picture_number&~0xFF) + i;
|
||||||
|
|
||||||
/* PTYPE starts here */
|
/* PTYPE starts here */
|
||||||
|
@ -275,7 +275,7 @@ void h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
|||||||
*/
|
*/
|
||||||
void ff_clean_h263_qscales(MpegEncContext *s){
|
void ff_clean_h263_qscales(MpegEncContext *s){
|
||||||
int i;
|
int i;
|
||||||
int8_t * const qscale_table= s->current_picture.qscale_table;
|
int8_t * const qscale_table = s->current_picture.f.qscale_table;
|
||||||
|
|
||||||
ff_init_qscale_tab(s);
|
ff_init_qscale_tab(s);
|
||||||
|
|
||||||
@ -529,8 +529,8 @@ void h263_encode_mb(MpegEncContext * s,
|
|||||||
/* motion vectors: 8x8 mode*/
|
/* motion vectors: 8x8 mode*/
|
||||||
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||||
|
|
||||||
motion_x= s->current_picture.motion_val[0][ s->block_index[i] ][0];
|
motion_x = s->current_picture.f.motion_val[0][s->block_index[i]][0];
|
||||||
motion_y= s->current_picture.motion_val[0][ s->block_index[i] ][1];
|
motion_y = s->current_picture.f.motion_val[0][s->block_index[i]][1];
|
||||||
if (!s->umvplus) {
|
if (!s->umvplus) {
|
||||||
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
||||||
motion_y - pred_y, 1);
|
motion_y - pred_y, 1);
|
||||||
|
@ -533,16 +533,16 @@ static inline void set_p_mv_tables(MpegEncContext * s, int mx, int my, int mv4)
|
|||||||
if(mv4){
|
if(mv4){
|
||||||
int mot_xy= s->block_index[0];
|
int mot_xy= s->block_index[0];
|
||||||
|
|
||||||
s->current_picture.motion_val[0][mot_xy ][0]= mx;
|
s->current_picture.f.motion_val[0][mot_xy ][0] = mx;
|
||||||
s->current_picture.motion_val[0][mot_xy ][1]= my;
|
s->current_picture.f.motion_val[0][mot_xy ][1] = my;
|
||||||
s->current_picture.motion_val[0][mot_xy+1][0]= mx;
|
s->current_picture.f.motion_val[0][mot_xy + 1][0] = mx;
|
||||||
s->current_picture.motion_val[0][mot_xy+1][1]= my;
|
s->current_picture.f.motion_val[0][mot_xy + 1][1] = my;
|
||||||
|
|
||||||
mot_xy += s->b8_stride;
|
mot_xy += s->b8_stride;
|
||||||
s->current_picture.motion_val[0][mot_xy ][0]= mx;
|
s->current_picture.f.motion_val[0][mot_xy ][0] = mx;
|
||||||
s->current_picture.motion_val[0][mot_xy ][1]= my;
|
s->current_picture.f.motion_val[0][mot_xy ][1] = my;
|
||||||
s->current_picture.motion_val[0][mot_xy+1][0]= mx;
|
s->current_picture.f.motion_val[0][mot_xy + 1][0] = mx;
|
||||||
s->current_picture.motion_val[0][mot_xy+1][1]= my;
|
s->current_picture.f.motion_val[0][mot_xy + 1][1] = my;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -615,8 +615,8 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
|||||||
const int mot_stride = s->b8_stride;
|
const int mot_stride = s->b8_stride;
|
||||||
const int mot_xy = s->block_index[block];
|
const int mot_xy = s->block_index[block];
|
||||||
|
|
||||||
P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0];
|
P_LEFT[0] = s->current_picture.f.motion_val[0][mot_xy - 1][0];
|
||||||
P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1];
|
P_LEFT[1] = s->current_picture.f.motion_val[0][mot_xy - 1][1];
|
||||||
|
|
||||||
if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
|
if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
|
||||||
|
|
||||||
@ -625,10 +625,10 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
|||||||
c->pred_x= pred_x4= P_LEFT[0];
|
c->pred_x= pred_x4= P_LEFT[0];
|
||||||
c->pred_y= pred_y4= P_LEFT[1];
|
c->pred_y= pred_y4= P_LEFT[1];
|
||||||
} else {
|
} else {
|
||||||
P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0];
|
P_TOP[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][0];
|
||||||
P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1];
|
P_TOP[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][1];
|
||||||
P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][0];
|
P_TOPRIGHT[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + off[block]][0];
|
||||||
P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][1];
|
P_TOPRIGHT[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + off[block]][1];
|
||||||
if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
|
if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
|
||||||
if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift);
|
if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift);
|
||||||
if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift);
|
if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift);
|
||||||
@ -680,8 +680,8 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
|||||||
my4_sum+= my4;
|
my4_sum+= my4;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->current_picture.motion_val[0][ s->block_index[block] ][0]= mx4;
|
s->current_picture.f.motion_val[0][s->block_index[block]][0] = mx4;
|
||||||
s->current_picture.motion_val[0][ s->block_index[block] ][1]= my4;
|
s->current_picture.f.motion_val[0][s->block_index[block]][1] = my4;
|
||||||
|
|
||||||
if(mx4 != mx || my4 != my) same=0;
|
if(mx4 != mx || my4 != my) same=0;
|
||||||
}
|
}
|
||||||
@ -690,7 +690,7 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
|||||||
return INT_MAX;
|
return INT_MAX;
|
||||||
|
|
||||||
if(s->dsp.me_sub_cmp[0] != s->dsp.mb_cmp[0]){
|
if(s->dsp.me_sub_cmp[0] != s->dsp.mb_cmp[0]){
|
||||||
dmin_sum += s->dsp.mb_cmp[0](s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*16*stride, c->scratchpad, stride, 16);
|
dmin_sum += s->dsp.mb_cmp[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*16*stride, c->scratchpad, stride, 16);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(c->avctx->mb_cmp&FF_CMP_CHROMA){
|
if(c->avctx->mb_cmp&FF_CMP_CHROMA){
|
||||||
@ -705,15 +705,15 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
|||||||
offset= (s->mb_x*8 + (mx>>1)) + (s->mb_y*8 + (my>>1))*s->uvlinesize;
|
offset= (s->mb_x*8 + (mx>>1)) + (s->mb_y*8 + (my>>1))*s->uvlinesize;
|
||||||
|
|
||||||
if(s->no_rounding){
|
if(s->no_rounding){
|
||||||
s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad , s->last_picture.data[1] + offset, s->uvlinesize, 8);
|
s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8);
|
||||||
s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad+8 , s->last_picture.data[2] + offset, s->uvlinesize, 8);
|
s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8);
|
||||||
}else{
|
}else{
|
||||||
s->dsp.put_pixels_tab [1][dxy](c->scratchpad , s->last_picture.data[1] + offset, s->uvlinesize, 8);
|
s->dsp.put_pixels_tab [1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8);
|
||||||
s->dsp.put_pixels_tab [1][dxy](c->scratchpad+8 , s->last_picture.data[2] + offset, s->uvlinesize, 8);
|
s->dsp.put_pixels_tab [1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad , s->uvlinesize, 8);
|
dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad , s->uvlinesize, 8);
|
||||||
dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad+8, s->uvlinesize, 8);
|
dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad+8, s->uvlinesize, 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
c->pred_x= mx;
|
c->pred_x= mx;
|
||||||
@ -879,7 +879,7 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int
|
|||||||
Picture *p= s->current_picture_ptr;
|
Picture *p= s->current_picture_ptr;
|
||||||
int mb_xy= mb_x + mb_y*s->mb_stride;
|
int mb_xy= mb_x + mb_y*s->mb_stride;
|
||||||
int xy= 2*mb_x + 2*mb_y*s->b8_stride;
|
int xy= 2*mb_x + 2*mb_y*s->b8_stride;
|
||||||
int mb_type= s->current_picture.mb_type[mb_xy];
|
int mb_type= s->current_picture.f.mb_type[mb_xy];
|
||||||
int flags= c->flags;
|
int flags= c->flags;
|
||||||
int shift= (flags&FLAG_QPEL) + 1;
|
int shift= (flags&FLAG_QPEL) + 1;
|
||||||
int mask= (1<<shift)-1;
|
int mask= (1<<shift)-1;
|
||||||
@ -896,8 +896,8 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int
|
|||||||
|
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
int xy= s->block_index[i];
|
int xy= s->block_index[i];
|
||||||
clip_input_mv(s, p->motion_val[0][xy], !!IS_INTERLACED(mb_type));
|
clip_input_mv(s, p->f.motion_val[0][xy], !!IS_INTERLACED(mb_type));
|
||||||
clip_input_mv(s, p->motion_val[1][xy], !!IS_INTERLACED(mb_type));
|
clip_input_mv(s, p->f.motion_val[1][xy], !!IS_INTERLACED(mb_type));
|
||||||
}
|
}
|
||||||
|
|
||||||
if(IS_INTERLACED(mb_type)){
|
if(IS_INTERLACED(mb_type)){
|
||||||
@ -912,8 +912,8 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(USES_LIST(mb_type, 0)){
|
if(USES_LIST(mb_type, 0)){
|
||||||
int field_select0= p->ref_index[0][4*mb_xy ];
|
int field_select0= p->f.ref_index[0][4*mb_xy ];
|
||||||
int field_select1= p->ref_index[0][4*mb_xy+2];
|
int field_select1= p->f.ref_index[0][4*mb_xy+2];
|
||||||
assert(field_select0==0 ||field_select0==1);
|
assert(field_select0==0 ||field_select0==1);
|
||||||
assert(field_select1==0 ||field_select1==1);
|
assert(field_select1==0 ||field_select1==1);
|
||||||
init_interlaced_ref(s, 0);
|
init_interlaced_ref(s, 0);
|
||||||
@ -921,46 +921,46 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int
|
|||||||
if(p_type){
|
if(p_type){
|
||||||
s->p_field_select_table[0][mb_xy]= field_select0;
|
s->p_field_select_table[0][mb_xy]= field_select0;
|
||||||
s->p_field_select_table[1][mb_xy]= field_select1;
|
s->p_field_select_table[1][mb_xy]= field_select1;
|
||||||
*(uint32_t*)s->p_field_mv_table[0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[0][xy ];
|
*(uint32_t*)s->p_field_mv_table[0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy ];
|
||||||
*(uint32_t*)s->p_field_mv_table[1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[0][xy2];
|
*(uint32_t*)s->p_field_mv_table[1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy2];
|
||||||
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER_I;
|
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER_I;
|
||||||
}else{
|
}else{
|
||||||
s->b_field_select_table[0][0][mb_xy]= field_select0;
|
s->b_field_select_table[0][0][mb_xy]= field_select0;
|
||||||
s->b_field_select_table[0][1][mb_xy]= field_select1;
|
s->b_field_select_table[0][1][mb_xy]= field_select1;
|
||||||
*(uint32_t*)s->b_field_mv_table[0][0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[0][xy ];
|
*(uint32_t*)s->b_field_mv_table[0][0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy ];
|
||||||
*(uint32_t*)s->b_field_mv_table[0][1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[0][xy2];
|
*(uint32_t*)s->b_field_mv_table[0][1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy2];
|
||||||
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_FORWARD_I;
|
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_FORWARD_I;
|
||||||
}
|
}
|
||||||
|
|
||||||
x= p->motion_val[0][xy ][0];
|
x = p->f.motion_val[0][xy ][0];
|
||||||
y= p->motion_val[0][xy ][1];
|
y = p->f.motion_val[0][xy ][1];
|
||||||
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0, 0, cmpf, chroma_cmpf, flags);
|
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0, 0, cmpf, chroma_cmpf, flags);
|
||||||
x= p->motion_val[0][xy2][0];
|
x = p->f.motion_val[0][xy2][0];
|
||||||
y= p->motion_val[0][xy2][1];
|
y = p->f.motion_val[0][xy2][1];
|
||||||
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1, 1, cmpf, chroma_cmpf, flags);
|
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1, 1, cmpf, chroma_cmpf, flags);
|
||||||
}
|
}
|
||||||
if(USES_LIST(mb_type, 1)){
|
if(USES_LIST(mb_type, 1)){
|
||||||
int field_select0= p->ref_index[1][4*mb_xy ];
|
int field_select0 = p->f.ref_index[1][4 * mb_xy ];
|
||||||
int field_select1= p->ref_index[1][4*mb_xy+2];
|
int field_select1 = p->f.ref_index[1][4 * mb_xy + 2];
|
||||||
assert(field_select0==0 ||field_select0==1);
|
assert(field_select0==0 ||field_select0==1);
|
||||||
assert(field_select1==0 ||field_select1==1);
|
assert(field_select1==0 ||field_select1==1);
|
||||||
init_interlaced_ref(s, 2);
|
init_interlaced_ref(s, 2);
|
||||||
|
|
||||||
s->b_field_select_table[1][0][mb_xy]= field_select0;
|
s->b_field_select_table[1][0][mb_xy]= field_select0;
|
||||||
s->b_field_select_table[1][1][mb_xy]= field_select1;
|
s->b_field_select_table[1][1][mb_xy]= field_select1;
|
||||||
*(uint32_t*)s->b_field_mv_table[1][0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[1][xy ];
|
*(uint32_t*)s->b_field_mv_table[1][0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[1][xy ];
|
||||||
*(uint32_t*)s->b_field_mv_table[1][1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[1][xy2];
|
*(uint32_t*)s->b_field_mv_table[1][1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[1][xy2];
|
||||||
if(USES_LIST(mb_type, 0)){
|
if(USES_LIST(mb_type, 0)){
|
||||||
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BIDIR_I;
|
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BIDIR_I;
|
||||||
}else{
|
}else{
|
||||||
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BACKWARD_I;
|
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BACKWARD_I;
|
||||||
}
|
}
|
||||||
|
|
||||||
x= p->motion_val[1][xy ][0];
|
x = p->f.motion_val[1][xy ][0];
|
||||||
y= p->motion_val[1][xy ][1];
|
y = p->f.motion_val[1][xy ][1];
|
||||||
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0+2, 0, cmpf, chroma_cmpf, flags);
|
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0+2, 0, cmpf, chroma_cmpf, flags);
|
||||||
x= p->motion_val[1][xy2][0];
|
x = p->f.motion_val[1][xy2][0];
|
||||||
y= p->motion_val[1][xy2][1];
|
y = p->f.motion_val[1][xy2][1];
|
||||||
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1+2, 1, cmpf, chroma_cmpf, flags);
|
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1+2, 1, cmpf, chroma_cmpf, flags);
|
||||||
//FIXME bidir scores
|
//FIXME bidir scores
|
||||||
}
|
}
|
||||||
@ -976,33 +976,33 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int
|
|||||||
init_mv4_ref(c);
|
init_mv4_ref(c);
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
xy= s->block_index[i];
|
xy= s->block_index[i];
|
||||||
x= p->motion_val[0][xy][0];
|
x= p->f.motion_val[0][xy][0];
|
||||||
y= p->motion_val[0][xy][1];
|
y= p->f.motion_val[0][xy][1];
|
||||||
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 1, 8, i, i, cmpf, chroma_cmpf, flags);
|
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 1, 8, i, i, cmpf, chroma_cmpf, flags);
|
||||||
}
|
}
|
||||||
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER4V;
|
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER4V;
|
||||||
}else{
|
}else{
|
||||||
if(USES_LIST(mb_type, 0)){
|
if(USES_LIST(mb_type, 0)){
|
||||||
if(p_type){
|
if(p_type){
|
||||||
*(uint32_t*)s->p_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy];
|
*(uint32_t*)s->p_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy];
|
||||||
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER;
|
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER;
|
||||||
}else if(USES_LIST(mb_type, 1)){
|
}else if(USES_LIST(mb_type, 1)){
|
||||||
*(uint32_t*)s->b_bidir_forw_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy];
|
*(uint32_t*)s->b_bidir_forw_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy];
|
||||||
*(uint32_t*)s->b_bidir_back_mv_table[mb_xy]= *(uint32_t*)p->motion_val[1][xy];
|
*(uint32_t*)s->b_bidir_back_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[1][xy];
|
||||||
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BIDIR;
|
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BIDIR;
|
||||||
}else{
|
}else{
|
||||||
*(uint32_t*)s->b_forw_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy];
|
*(uint32_t*)s->b_forw_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy];
|
||||||
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_FORWARD;
|
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_FORWARD;
|
||||||
}
|
}
|
||||||
x= p->motion_val[0][xy][0];
|
x = p->f.motion_val[0][xy][0];
|
||||||
y= p->motion_val[0][xy][1];
|
y = p->f.motion_val[0][xy][1];
|
||||||
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 0, 0, cmpf, chroma_cmpf, flags);
|
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 0, 0, cmpf, chroma_cmpf, flags);
|
||||||
}else if(USES_LIST(mb_type, 1)){
|
}else if(USES_LIST(mb_type, 1)){
|
||||||
*(uint32_t*)s->b_back_mv_table[mb_xy]= *(uint32_t*)p->motion_val[1][xy];
|
*(uint32_t*)s->b_back_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[1][xy];
|
||||||
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BACKWARD;
|
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BACKWARD;
|
||||||
|
|
||||||
x= p->motion_val[1][xy][0];
|
x = p->f.motion_val[1][xy][0];
|
||||||
y= p->motion_val[1][xy][1];
|
y = p->f.motion_val[1][xy][1];
|
||||||
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 2, 0, cmpf, chroma_cmpf, flags);
|
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 2, 0, cmpf, chroma_cmpf, flags);
|
||||||
}else
|
}else
|
||||||
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTRA;
|
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTRA;
|
||||||
@ -1023,7 +1023,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
|
|||||||
int mb_type=0;
|
int mb_type=0;
|
||||||
Picture * const pic= &s->current_picture;
|
Picture * const pic= &s->current_picture;
|
||||||
|
|
||||||
init_ref(c, s->new_picture.data, s->last_picture.data, NULL, 16*mb_x, 16*mb_y, 0);
|
init_ref(c, s->new_picture.f.data, s->last_picture.f.data, NULL, 16*mb_x, 16*mb_y, 0);
|
||||||
|
|
||||||
assert(s->quarter_sample==0 || s->quarter_sample==1);
|
assert(s->quarter_sample==0 || s->quarter_sample==1);
|
||||||
assert(s->linesize == c->stride);
|
assert(s->linesize == c->stride);
|
||||||
@ -1075,16 +1075,16 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
|
|||||||
const int mot_stride = s->b8_stride;
|
const int mot_stride = s->b8_stride;
|
||||||
const int mot_xy = s->block_index[0];
|
const int mot_xy = s->block_index[0];
|
||||||
|
|
||||||
P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0];
|
P_LEFT[0] = s->current_picture.f.motion_val[0][mot_xy - 1][0];
|
||||||
P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1];
|
P_LEFT[1] = s->current_picture.f.motion_val[0][mot_xy - 1][1];
|
||||||
|
|
||||||
if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
|
if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
|
||||||
|
|
||||||
if(!s->first_slice_line) {
|
if(!s->first_slice_line) {
|
||||||
P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0];
|
P_TOP[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][0];
|
||||||
P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1];
|
P_TOP[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][1];
|
||||||
P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][0];
|
P_TOPRIGHT[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + 2][0];
|
||||||
P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][1];
|
P_TOPRIGHT[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + 2][1];
|
||||||
if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
|
if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
|
||||||
if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift);
|
if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift);
|
||||||
if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
|
if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
|
||||||
@ -1226,7 +1226,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
|
|||||||
}else{
|
}else{
|
||||||
mean= (s->last_dc[i] + 4)>>3;
|
mean= (s->last_dc[i] + 4)>>3;
|
||||||
}
|
}
|
||||||
dest_c = s->new_picture.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
|
dest_c = s->new_picture.f.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
|
||||||
|
|
||||||
mean*= 0x01010101;
|
mean*= 0x01010101;
|
||||||
for(i=0; i<8; i++){
|
for(i=0; i<8; i++){
|
||||||
@ -1242,9 +1242,9 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
|
|||||||
|
|
||||||
if(intra_score < dmin){
|
if(intra_score < dmin){
|
||||||
mb_type= CANDIDATE_MB_TYPE_INTRA;
|
mb_type= CANDIDATE_MB_TYPE_INTRA;
|
||||||
s->current_picture.mb_type[mb_y*s->mb_stride + mb_x]= CANDIDATE_MB_TYPE_INTRA; //FIXME cleanup
|
s->current_picture.f.mb_type[mb_y*s->mb_stride + mb_x] = CANDIDATE_MB_TYPE_INTRA; //FIXME cleanup
|
||||||
}else
|
}else
|
||||||
s->current_picture.mb_type[mb_y*s->mb_stride + mb_x]= 0;
|
s->current_picture.f.mb_type[mb_y*s->mb_stride + mb_x] = 0;
|
||||||
|
|
||||||
{
|
{
|
||||||
int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
|
int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
|
||||||
@ -1264,7 +1264,7 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
|
|||||||
int P[10][2];
|
int P[10][2];
|
||||||
const int shift= 1+s->quarter_sample;
|
const int shift= 1+s->quarter_sample;
|
||||||
const int xy= mb_x + mb_y*s->mb_stride;
|
const int xy= mb_x + mb_y*s->mb_stride;
|
||||||
init_ref(c, s->new_picture.data, s->last_picture.data, NULL, 16*mb_x, 16*mb_y, 0);
|
init_ref(c, s->new_picture.f.data, s->last_picture.f.data, NULL, 16*mb_x, 16*mb_y, 0);
|
||||||
|
|
||||||
assert(s->quarter_sample==0 || s->quarter_sample==1);
|
assert(s->quarter_sample==0 || s->quarter_sample==1);
|
||||||
|
|
||||||
@ -1615,7 +1615,7 @@ static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y)
|
|||||||
ymin= xmin=(-32)>>shift;
|
ymin= xmin=(-32)>>shift;
|
||||||
ymax= xmax= 31>>shift;
|
ymax= xmax= 31>>shift;
|
||||||
|
|
||||||
if(IS_8X8(s->next_picture.mb_type[mot_xy])){
|
if (IS_8X8(s->next_picture.f.mb_type[mot_xy])) {
|
||||||
s->mv_type= MV_TYPE_8X8;
|
s->mv_type= MV_TYPE_8X8;
|
||||||
}else{
|
}else{
|
||||||
s->mv_type= MV_TYPE_16X16;
|
s->mv_type= MV_TYPE_16X16;
|
||||||
@ -1625,8 +1625,8 @@ static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y)
|
|||||||
int index= s->block_index[i];
|
int index= s->block_index[i];
|
||||||
int min, max;
|
int min, max;
|
||||||
|
|
||||||
c->co_located_mv[i][0]= s->next_picture.motion_val[0][index][0];
|
c->co_located_mv[i][0] = s->next_picture.f.motion_val[0][index][0];
|
||||||
c->co_located_mv[i][1]= s->next_picture.motion_val[0][index][1];
|
c->co_located_mv[i][1] = s->next_picture.f.motion_val[0][index][1];
|
||||||
c->direct_basis_mv[i][0]= c->co_located_mv[i][0]*time_pb/time_pp + ((i& 1)<<(shift+3));
|
c->direct_basis_mv[i][0]= c->co_located_mv[i][0]*time_pb/time_pp + ((i& 1)<<(shift+3));
|
||||||
c->direct_basis_mv[i][1]= c->co_located_mv[i][1]*time_pb/time_pp + ((i>>1)<<(shift+3));
|
c->direct_basis_mv[i][1]= c->co_located_mv[i][1]*time_pb/time_pp + ((i>>1)<<(shift+3));
|
||||||
// c->direct_basis_mv[1][i][0]= c->co_located_mv[i][0]*(time_pb - time_pp)/time_pp + ((i &1)<<(shift+3);
|
// c->direct_basis_mv[1][i][0]= c->co_located_mv[i][0]*(time_pb - time_pp)/time_pp + ((i &1)<<(shift+3);
|
||||||
@ -1708,13 +1708,14 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
|
|||||||
int fmin, bmin, dmin, fbmin, bimin, fimin;
|
int fmin, bmin, dmin, fbmin, bimin, fimin;
|
||||||
int type=0;
|
int type=0;
|
||||||
const int xy = mb_y*s->mb_stride + mb_x;
|
const int xy = mb_y*s->mb_stride + mb_x;
|
||||||
init_ref(c, s->new_picture.data, s->last_picture.data, s->next_picture.data, 16*mb_x, 16*mb_y, 2);
|
init_ref(c, s->new_picture.f.data, s->last_picture.f.data,
|
||||||
|
s->next_picture.f.data, 16 * mb_x, 16 * mb_y, 2);
|
||||||
|
|
||||||
get_limits(s, 16*mb_x, 16*mb_y);
|
get_limits(s, 16*mb_x, 16*mb_y);
|
||||||
|
|
||||||
c->skip=0;
|
c->skip=0;
|
||||||
|
|
||||||
if(s->codec_id == CODEC_ID_MPEG4 && s->next_picture.mbskip_table[xy]){
|
if (s->codec_id == CODEC_ID_MPEG4 && s->next_picture.f.mbskip_table[xy]) {
|
||||||
int score= direct_search(s, mb_x, mb_y); //FIXME just check 0,0
|
int score= direct_search(s, mb_x, mb_y); //FIXME just check 0,0
|
||||||
|
|
||||||
score= ((unsigned)(score*score + 128*256))>>16;
|
score= ((unsigned)(score*score + 128*256))>>16;
|
||||||
@ -1947,14 +1948,14 @@ void ff_fix_long_p_mvs(MpegEncContext * s)
|
|||||||
int block;
|
int block;
|
||||||
for(block=0; block<4; block++){
|
for(block=0; block<4; block++){
|
||||||
int off= (block& 1) + (block>>1)*wrap;
|
int off= (block& 1) + (block>>1)*wrap;
|
||||||
int mx= s->current_picture.motion_val[0][ xy + off ][0];
|
int mx = s->current_picture.f.motion_val[0][ xy + off ][0];
|
||||||
int my= s->current_picture.motion_val[0][ xy + off ][1];
|
int my = s->current_picture.f.motion_val[0][ xy + off ][1];
|
||||||
|
|
||||||
if( mx >=range || mx <-range
|
if( mx >=range || mx <-range
|
||||||
|| my >=range || my <-range){
|
|| my >=range || my <-range){
|
||||||
s->mb_type[i] &= ~CANDIDATE_MB_TYPE_INTER4V;
|
s->mb_type[i] &= ~CANDIDATE_MB_TYPE_INTER4V;
|
||||||
s->mb_type[i] |= CANDIDATE_MB_TYPE_INTRA;
|
s->mb_type[i] |= CANDIDATE_MB_TYPE_INTRA;
|
||||||
s->current_picture.mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
|
s->current_picture.f.mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -221,20 +221,20 @@ static int mpeg_decode_mb(MpegEncContext *s,
|
|||||||
if (s->mb_skip_run-- != 0) {
|
if (s->mb_skip_run-- != 0) {
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
||||||
s->mb_skipped = 1;
|
s->mb_skipped = 1;
|
||||||
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
|
s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
|
||||||
} else {
|
} else {
|
||||||
int mb_type;
|
int mb_type;
|
||||||
|
|
||||||
if(s->mb_x)
|
if(s->mb_x)
|
||||||
mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1];
|
mb_type = s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
|
||||||
else
|
else
|
||||||
mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
|
mb_type = s->current_picture.f.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
|
||||||
if(IS_INTRA(mb_type))
|
if(IS_INTRA(mb_type))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]=
|
s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
|
||||||
mb_type | MB_TYPE_SKIP;
|
mb_type | MB_TYPE_SKIP;
|
||||||
// assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
|
// assert(s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
|
||||||
|
|
||||||
if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0)
|
if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0)
|
||||||
s->mb_skipped = 1;
|
s->mb_skipped = 1;
|
||||||
@ -577,7 +577,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type;
|
s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1385,8 +1385,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
|
|||||||
s->mpeg_f_code[1][0] = f_code;
|
s->mpeg_f_code[1][0] = f_code;
|
||||||
s->mpeg_f_code[1][1] = f_code;
|
s->mpeg_f_code[1][1] = f_code;
|
||||||
}
|
}
|
||||||
s->current_picture.pict_type= s->pict_type;
|
s->current_picture.f.pict_type = s->pict_type;
|
||||||
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
|
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
|
||||||
|
|
||||||
if(avctx->debug & FF_DEBUG_PICT_INFO)
|
if(avctx->debug & FF_DEBUG_PICT_INFO)
|
||||||
av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
|
av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
|
||||||
@ -1539,8 +1539,8 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
|
|||||||
s->pict_type= AV_PICTURE_TYPE_P;
|
s->pict_type= AV_PICTURE_TYPE_P;
|
||||||
}else
|
}else
|
||||||
s->pict_type= AV_PICTURE_TYPE_B;
|
s->pict_type= AV_PICTURE_TYPE_B;
|
||||||
s->current_picture.pict_type= s->pict_type;
|
s->current_picture.f.pict_type = s->pict_type;
|
||||||
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
|
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
|
||||||
}
|
}
|
||||||
s->intra_dc_precision = get_bits(&s->gb, 2);
|
s->intra_dc_precision = get_bits(&s->gb, 2);
|
||||||
s->picture_structure = get_bits(&s->gb, 2);
|
s->picture_structure = get_bits(&s->gb, 2);
|
||||||
@ -1618,19 +1618,19 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
|
|||||||
ff_er_frame_start(s);
|
ff_er_frame_start(s);
|
||||||
|
|
||||||
/* first check if we must repeat the frame */
|
/* first check if we must repeat the frame */
|
||||||
s->current_picture_ptr->repeat_pict = 0;
|
s->current_picture_ptr->f.repeat_pict = 0;
|
||||||
if (s->repeat_first_field) {
|
if (s->repeat_first_field) {
|
||||||
if (s->progressive_sequence) {
|
if (s->progressive_sequence) {
|
||||||
if (s->top_field_first)
|
if (s->top_field_first)
|
||||||
s->current_picture_ptr->repeat_pict = 4;
|
s->current_picture_ptr->f.repeat_pict = 4;
|
||||||
else
|
else
|
||||||
s->current_picture_ptr->repeat_pict = 2;
|
s->current_picture_ptr->f.repeat_pict = 2;
|
||||||
} else if (s->progressive_frame) {
|
} else if (s->progressive_frame) {
|
||||||
s->current_picture_ptr->repeat_pict = 1;
|
s->current_picture_ptr->f.repeat_pict = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*s->current_picture_ptr->pan_scan= s1->pan_scan;
|
*s->current_picture_ptr->f.pan_scan = s1->pan_scan;
|
||||||
|
|
||||||
if (HAVE_PTHREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
|
if (HAVE_PTHREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
|
||||||
ff_thread_finish_setup(avctx);
|
ff_thread_finish_setup(avctx);
|
||||||
@ -1643,9 +1643,9 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
s->current_picture.data[i] = s->current_picture_ptr->data[i];
|
s->current_picture.f.data[i] = s->current_picture_ptr->f.data[i];
|
||||||
if(s->picture_structure == PICT_BOTTOM_FIELD){
|
if(s->picture_structure == PICT_BOTTOM_FIELD){
|
||||||
s->current_picture.data[i] += s->current_picture_ptr->linesize[i];
|
s->current_picture.f.data[i] += s->current_picture_ptr->f.linesize[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1767,7 +1767,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
|
|||||||
if(mpeg_decode_mb(s, s->block) < 0)
|
if(mpeg_decode_mb(s, s->block) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs
|
if (s->current_picture.f.motion_val[0] && !s->encoding) { //note motion_val is normally NULL unless we want to extract the MVs
|
||||||
const int wrap = s->b8_stride;
|
const int wrap = s->b8_stride;
|
||||||
int xy = s->mb_x*2 + s->mb_y*2*wrap;
|
int xy = s->mb_x*2 + s->mb_y*2*wrap;
|
||||||
int b8_xy= 4*(s->mb_x + s->mb_y*s->mb_stride);
|
int b8_xy= 4*(s->mb_x + s->mb_y*s->mb_stride);
|
||||||
@ -1785,12 +1785,12 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
|
|||||||
motion_y = s->mv[dir][i][1];
|
motion_y = s->mv[dir][i][1];
|
||||||
}
|
}
|
||||||
|
|
||||||
s->current_picture.motion_val[dir][xy ][0] = motion_x;
|
s->current_picture.f.motion_val[dir][xy ][0] = motion_x;
|
||||||
s->current_picture.motion_val[dir][xy ][1] = motion_y;
|
s->current_picture.f.motion_val[dir][xy ][1] = motion_y;
|
||||||
s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
|
s->current_picture.f.motion_val[dir][xy + 1][0] = motion_x;
|
||||||
s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
|
s->current_picture.f.motion_val[dir][xy + 1][1] = motion_y;
|
||||||
s->current_picture.ref_index [dir][b8_xy ]=
|
s->current_picture.f.ref_index [dir][b8_xy ] =
|
||||||
s->current_picture.ref_index [dir][b8_xy + 1]= s->field_select[dir][i];
|
s->current_picture.f.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
|
||||||
assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1);
|
assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1);
|
||||||
}
|
}
|
||||||
xy += wrap;
|
xy += wrap;
|
||||||
@ -1954,7 +1954,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
|
|||||||
if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) {
|
if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) {
|
||||||
/* end of image */
|
/* end of image */
|
||||||
|
|
||||||
s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2;
|
s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_MPEG2;
|
||||||
|
|
||||||
ff_er_frame_end(s);
|
ff_er_frame_end(s);
|
||||||
|
|
||||||
|
@ -200,7 +200,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
|
|||||||
|
|
||||||
if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA)
|
if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA)
|
||||||
|
|
||||||
if (s->current_picture.key_frame) {
|
if (s->current_picture.f.key_frame) {
|
||||||
AVRational framerate= ff_frame_rate_tab[s->frame_rate_index];
|
AVRational framerate= ff_frame_rate_tab[s->frame_rate_index];
|
||||||
|
|
||||||
/* mpeg1 header repeated every gop */
|
/* mpeg1 header repeated every gop */
|
||||||
@ -287,9 +287,9 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
|
|||||||
/* time code : we must convert from the real frame rate to a
|
/* time code : we must convert from the real frame rate to a
|
||||||
fake mpeg frame rate in case of low frame rate */
|
fake mpeg frame rate in case of low frame rate */
|
||||||
fps = (framerate.num + framerate.den/2)/ framerate.den;
|
fps = (framerate.num + framerate.den/2)/ framerate.den;
|
||||||
time_code = s->current_picture_ptr->coded_picture_number + s->avctx->timecode_frame_start;
|
time_code = s->current_picture_ptr->f.coded_picture_number + s->avctx->timecode_frame_start;
|
||||||
|
|
||||||
s->gop_picture_number = s->current_picture_ptr->coded_picture_number;
|
s->gop_picture_number = s->current_picture_ptr->f.coded_picture_number;
|
||||||
if (s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) {
|
if (s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) {
|
||||||
/* only works for NTSC 29.97 */
|
/* only works for NTSC 29.97 */
|
||||||
int d = time_code / 17982;
|
int d = time_code / 17982;
|
||||||
@ -396,7 +396,7 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
|
|||||||
if (s->progressive_sequence) {
|
if (s->progressive_sequence) {
|
||||||
put_bits(&s->pb, 1, 0); /* no repeat */
|
put_bits(&s->pb, 1, 0); /* no repeat */
|
||||||
} else {
|
} else {
|
||||||
put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first);
|
put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
|
||||||
}
|
}
|
||||||
/* XXX: optimize the generation of this flag with entropy
|
/* XXX: optimize the generation of this flag with entropy
|
||||||
measures */
|
measures */
|
||||||
|
@ -89,7 +89,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my,
|
|||||||
uint16_t time_pb= s->pb_time;
|
uint16_t time_pb= s->pb_time;
|
||||||
int p_mx, p_my;
|
int p_mx, p_my;
|
||||||
|
|
||||||
p_mx= s->next_picture.motion_val[0][xy][0];
|
p_mx = s->next_picture.f.motion_val[0][xy][0];
|
||||||
if((unsigned)(p_mx + tab_bias) < tab_size){
|
if((unsigned)(p_mx + tab_bias) < tab_size){
|
||||||
s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx;
|
s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx;
|
||||||
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
|
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
|
||||||
@ -99,7 +99,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my,
|
|||||||
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
|
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
|
||||||
: p_mx*(time_pb - time_pp)/time_pp;
|
: p_mx*(time_pb - time_pp)/time_pp;
|
||||||
}
|
}
|
||||||
p_my= s->next_picture.motion_val[0][xy][1];
|
p_my = s->next_picture.f.motion_val[0][xy][1];
|
||||||
if((unsigned)(p_my + tab_bias) < tab_size){
|
if((unsigned)(p_my + tab_bias) < tab_size){
|
||||||
s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my;
|
s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my;
|
||||||
s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
|
s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
|
||||||
@ -120,7 +120,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my,
|
|||||||
*/
|
*/
|
||||||
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
|
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
|
||||||
const int mb_index= s->mb_x + s->mb_y*s->mb_stride;
|
const int mb_index= s->mb_x + s->mb_y*s->mb_stride;
|
||||||
const int colocated_mb_type= s->next_picture.mb_type[mb_index];
|
const int colocated_mb_type = s->next_picture.f.mb_type[mb_index];
|
||||||
uint16_t time_pp;
|
uint16_t time_pp;
|
||||||
uint16_t time_pb;
|
uint16_t time_pb;
|
||||||
int i;
|
int i;
|
||||||
@ -137,7 +137,7 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
|
|||||||
} else if(IS_INTERLACED(colocated_mb_type)){
|
} else if(IS_INTERLACED(colocated_mb_type)){
|
||||||
s->mv_type = MV_TYPE_FIELD;
|
s->mv_type = MV_TYPE_FIELD;
|
||||||
for(i=0; i<2; i++){
|
for(i=0; i<2; i++){
|
||||||
int field_select= s->next_picture.ref_index[0][4*mb_index + 2*i];
|
int field_select = s->next_picture.f.ref_index[0][4 * mb_index + 2 * i];
|
||||||
s->field_select[0][i]= field_select;
|
s->field_select[0][i]= field_select;
|
||||||
s->field_select[1][i]= i;
|
s->field_select[1][i]= i;
|
||||||
if(s->top_field_first){
|
if(s->top_field_first){
|
||||||
|
@ -55,7 +55,7 @@ void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int16_t *ac_val, *ac_val1;
|
int16_t *ac_val, *ac_val1;
|
||||||
int8_t * const qscale_table= s->current_picture.qscale_table;
|
int8_t * const qscale_table = s->current_picture.f.qscale_table;
|
||||||
|
|
||||||
/* find prediction */
|
/* find prediction */
|
||||||
ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
|
ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
|
||||||
@ -376,7 +376,7 @@ int mpeg4_decode_video_packet_header(MpegEncContext *s)
|
|||||||
if(s->pict_type == AV_PICTURE_TYPE_B){
|
if(s->pict_type == AV_PICTURE_TYPE_B){
|
||||||
int mb_x = 0, mb_y = 0;
|
int mb_x = 0, mb_y = 0;
|
||||||
|
|
||||||
while(s->next_picture.mbskip_table[ s->mb_index2xy[ mb_num ] ]) {
|
while (s->next_picture.f.mbskip_table[s->mb_index2xy[mb_num]]) {
|
||||||
if (!mb_x) ff_thread_await_progress((AVFrame*)s->next_picture_ptr, mb_y++, 0);
|
if (!mb_x) ff_thread_await_progress((AVFrame*)s->next_picture_ptr, mb_y++, 0);
|
||||||
mb_num++;
|
mb_num++;
|
||||||
if (++mb_x == s->mb_width) mb_x = 0;
|
if (++mb_x == s->mb_width) mb_x = 0;
|
||||||
@ -570,13 +570,13 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
|
|||||||
}while(cbpc == 8);
|
}while(cbpc == 8);
|
||||||
|
|
||||||
s->cbp_table[xy]= cbpc & 3;
|
s->cbp_table[xy]= cbpc & 3;
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
|
||||||
s->mb_intra = 1;
|
s->mb_intra = 1;
|
||||||
|
|
||||||
if(cbpc & 4) {
|
if(cbpc & 4) {
|
||||||
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
|
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
|
||||||
}
|
}
|
||||||
s->current_picture.qscale_table[xy]= s->qscale;
|
s->current_picture.f.qscale_table[xy]= s->qscale;
|
||||||
|
|
||||||
s->mbintra_table[xy]= 1;
|
s->mbintra_table[xy]= 1;
|
||||||
for(i=0; i<6; i++){
|
for(i=0; i<6; i++){
|
||||||
@ -592,7 +592,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
|
|||||||
s->pred_dir_table[xy]= dir;
|
s->pred_dir_table[xy]= dir;
|
||||||
}else{ /* P/S_TYPE */
|
}else{ /* P/S_TYPE */
|
||||||
int mx, my, pred_x, pred_y, bits;
|
int mx, my, pred_x, pred_y, bits;
|
||||||
int16_t * const mot_val= s->current_picture.motion_val[0][s->block_index[0]];
|
int16_t * const mot_val = s->current_picture.f.motion_val[0][s->block_index[0]];
|
||||||
const int stride= s->b8_stride*2;
|
const int stride= s->b8_stride*2;
|
||||||
|
|
||||||
try_again:
|
try_again:
|
||||||
@ -604,11 +604,11 @@ try_again:
|
|||||||
if(bits&0x10000){
|
if(bits&0x10000){
|
||||||
/* skip mb */
|
/* skip mb */
|
||||||
if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
|
if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
|
||||||
mx= get_amv(s, 0);
|
mx= get_amv(s, 0);
|
||||||
my= get_amv(s, 1);
|
my= get_amv(s, 1);
|
||||||
}else{
|
}else{
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
mx=my=0;
|
mx=my=0;
|
||||||
}
|
}
|
||||||
mot_val[0 ]= mot_val[2 ]=
|
mot_val[0 ]= mot_val[2 ]=
|
||||||
@ -634,7 +634,7 @@ try_again:
|
|||||||
s->mb_intra = ((cbpc & 4) != 0);
|
s->mb_intra = ((cbpc & 4) != 0);
|
||||||
|
|
||||||
if(s->mb_intra){
|
if(s->mb_intra){
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
|
||||||
s->mbintra_table[xy]= 1;
|
s->mbintra_table[xy]= 1;
|
||||||
mot_val[0 ]= mot_val[2 ]=
|
mot_val[0 ]= mot_val[2 ]=
|
||||||
mot_val[0+stride]= mot_val[2+stride]= 0;
|
mot_val[0+stride]= mot_val[2+stride]= 0;
|
||||||
@ -660,11 +660,11 @@ try_again:
|
|||||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||||
if (my >= 0xffff)
|
if (my >= 0xffff)
|
||||||
return -1;
|
return -1;
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
} else {
|
} else {
|
||||||
mx = get_amv(s, 0);
|
mx = get_amv(s, 0);
|
||||||
my = get_amv(s, 1);
|
my = get_amv(s, 1);
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
|
||||||
}
|
}
|
||||||
|
|
||||||
mot_val[0 ]= mot_val[2 ] =
|
mot_val[0 ]= mot_val[2 ] =
|
||||||
@ -673,7 +673,7 @@ try_again:
|
|||||||
mot_val[1+stride]= mot_val[3+stride]= my;
|
mot_val[1+stride]= mot_val[3+stride]= my;
|
||||||
} else {
|
} else {
|
||||||
int i;
|
int i;
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||||
for(i=0;i<4;i++) {
|
for(i=0;i<4;i++) {
|
||||||
int16_t *mot_val= h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
int16_t *mot_val= h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||||
@ -725,9 +725,9 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
|
|||||||
}
|
}
|
||||||
|
|
||||||
s->cbp_table[xy]|= cbpy<<2;
|
s->cbp_table[xy]|= cbpy<<2;
|
||||||
s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
|
s->current_picture.f.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
|
||||||
}else{ /* P || S_TYPE */
|
}else{ /* P || S_TYPE */
|
||||||
if(IS_INTRA(s->current_picture.mb_type[xy])){
|
if (IS_INTRA(s->current_picture.f.mb_type[xy])) {
|
||||||
int dir=0,i;
|
int dir=0,i;
|
||||||
int ac_pred = get_bits1(&s->gb);
|
int ac_pred = get_bits1(&s->gb);
|
||||||
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
|
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
|
||||||
@ -740,7 +740,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
|
|||||||
if(s->cbp_table[xy] & 8) {
|
if(s->cbp_table[xy] & 8) {
|
||||||
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
|
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
|
||||||
}
|
}
|
||||||
s->current_picture.qscale_table[xy]= s->qscale;
|
s->current_picture.f.qscale_table[xy] = s->qscale;
|
||||||
|
|
||||||
for(i=0; i<6; i++){
|
for(i=0; i<6; i++){
|
||||||
int dc_pred_dir;
|
int dc_pred_dir;
|
||||||
@ -754,10 +754,10 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
|
|||||||
}
|
}
|
||||||
s->cbp_table[xy]&= 3; //remove dquant
|
s->cbp_table[xy]&= 3; //remove dquant
|
||||||
s->cbp_table[xy]|= cbpy<<2;
|
s->cbp_table[xy]|= cbpy<<2;
|
||||||
s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
|
s->current_picture.f.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
|
||||||
s->pred_dir_table[xy]= dir;
|
s->pred_dir_table[xy]= dir;
|
||||||
}else if(IS_SKIP(s->current_picture.mb_type[xy])){
|
} else if (IS_SKIP(s->current_picture.f.mb_type[xy])) {
|
||||||
s->current_picture.qscale_table[xy]= s->qscale;
|
s->current_picture.f.qscale_table[xy] = s->qscale;
|
||||||
s->cbp_table[xy]= 0;
|
s->cbp_table[xy]= 0;
|
||||||
}else{
|
}else{
|
||||||
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
|
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
|
||||||
@ -770,7 +770,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
|
|||||||
if(s->cbp_table[xy] & 8) {
|
if(s->cbp_table[xy] & 8) {
|
||||||
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
|
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
|
||||||
}
|
}
|
||||||
s->current_picture.qscale_table[xy]= s->qscale;
|
s->current_picture.f.qscale_table[xy] = s->qscale;
|
||||||
|
|
||||||
s->cbp_table[xy]&= 3; //remove dquant
|
s->cbp_table[xy]&= 3; //remove dquant
|
||||||
s->cbp_table[xy]|= (cbpy^0xf)<<2;
|
s->cbp_table[xy]|= (cbpy^0xf)<<2;
|
||||||
@ -1091,20 +1091,20 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
|
|||||||
int cbp, mb_type;
|
int cbp, mb_type;
|
||||||
const int xy= s->mb_x + s->mb_y*s->mb_stride;
|
const int xy= s->mb_x + s->mb_y*s->mb_stride;
|
||||||
|
|
||||||
mb_type= s->current_picture.mb_type[xy];
|
mb_type = s->current_picture.f.mb_type[xy];
|
||||||
cbp = s->cbp_table[xy];
|
cbp = s->cbp_table[xy];
|
||||||
|
|
||||||
s->use_intra_dc_vlc= s->qscale < s->intra_dc_threshold;
|
s->use_intra_dc_vlc= s->qscale < s->intra_dc_threshold;
|
||||||
|
|
||||||
if(s->current_picture.qscale_table[xy] != s->qscale){
|
if (s->current_picture.f.qscale_table[xy] != s->qscale) {
|
||||||
ff_set_qscale(s, s->current_picture.qscale_table[xy] );
|
ff_set_qscale(s, s->current_picture.f.qscale_table[xy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
|
if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
|
||||||
int i;
|
int i;
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
s->mv[0][i][0] = s->current_picture.motion_val[0][ s->block_index[i] ][0];
|
s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
|
||||||
s->mv[0][i][1] = s->current_picture.motion_val[0][ s->block_index[i] ][1];
|
s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
|
||||||
}
|
}
|
||||||
s->mb_intra = IS_INTRA(mb_type);
|
s->mb_intra = IS_INTRA(mb_type);
|
||||||
|
|
||||||
@ -1122,7 +1122,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
|
|||||||
s->mb_skipped = 1;
|
s->mb_skipped = 1;
|
||||||
}
|
}
|
||||||
}else if(s->mb_intra){
|
}else if(s->mb_intra){
|
||||||
s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
|
s->ac_pred = IS_ACPRED(s->current_picture.f.mb_type[xy]);
|
||||||
}else if(!s->mb_intra){
|
}else if(!s->mb_intra){
|
||||||
// s->mcsel= 0; //FIXME do we need to init that
|
// s->mcsel= 0; //FIXME do we need to init that
|
||||||
|
|
||||||
@ -1135,7 +1135,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
|
|||||||
}
|
}
|
||||||
} else { /* I-Frame */
|
} else { /* I-Frame */
|
||||||
s->mb_intra = 1;
|
s->mb_intra = 1;
|
||||||
s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
|
s->ac_pred = IS_ACPRED(s->current_picture.f.mb_type[xy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!IS_SKIP(mb_type)) {
|
if (!IS_SKIP(mb_type)) {
|
||||||
@ -1188,14 +1188,14 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
|||||||
s->mv_dir = MV_DIR_FORWARD;
|
s->mv_dir = MV_DIR_FORWARD;
|
||||||
s->mv_type = MV_TYPE_16X16;
|
s->mv_type = MV_TYPE_16X16;
|
||||||
if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
|
if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
s->mcsel=1;
|
s->mcsel=1;
|
||||||
s->mv[0][0][0]= get_amv(s, 0);
|
s->mv[0][0][0]= get_amv(s, 0);
|
||||||
s->mv[0][0][1]= get_amv(s, 1);
|
s->mv[0][0][1]= get_amv(s, 1);
|
||||||
|
|
||||||
s->mb_skipped = 0;
|
s->mb_skipped = 0;
|
||||||
}else{
|
}else{
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
s->mcsel=0;
|
s->mcsel=0;
|
||||||
s->mv[0][0][0] = 0;
|
s->mv[0][0][0] = 0;
|
||||||
s->mv[0][0][1] = 0;
|
s->mv[0][0][1] = 0;
|
||||||
@ -1230,7 +1230,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
|||||||
s->mv_dir = MV_DIR_FORWARD;
|
s->mv_dir = MV_DIR_FORWARD;
|
||||||
if ((cbpc & 16) == 0) {
|
if ((cbpc & 16) == 0) {
|
||||||
if(s->mcsel){
|
if(s->mcsel){
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
/* 16x16 global motion prediction */
|
/* 16x16 global motion prediction */
|
||||||
s->mv_type = MV_TYPE_16X16;
|
s->mv_type = MV_TYPE_16X16;
|
||||||
mx= get_amv(s, 0);
|
mx= get_amv(s, 0);
|
||||||
@ -1238,7 +1238,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
|||||||
s->mv[0][0][0] = mx;
|
s->mv[0][0][0] = mx;
|
||||||
s->mv[0][0][1] = my;
|
s->mv[0][0][1] = my;
|
||||||
}else if((!s->progressive_sequence) && get_bits1(&s->gb)){
|
}else if((!s->progressive_sequence) && get_bits1(&s->gb)){
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED;
|
||||||
/* 16x8 field motion prediction */
|
/* 16x8 field motion prediction */
|
||||||
s->mv_type= MV_TYPE_FIELD;
|
s->mv_type= MV_TYPE_FIELD;
|
||||||
|
|
||||||
@ -1260,7 +1260,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
|||||||
s->mv[0][i][1] = my;
|
s->mv[0][i][1] = my;
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
/* 16x16 motion prediction */
|
/* 16x16 motion prediction */
|
||||||
s->mv_type = MV_TYPE_16X16;
|
s->mv_type = MV_TYPE_16X16;
|
||||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||||
@ -1277,7 +1277,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
|||||||
s->mv[0][0][1] = my;
|
s->mv[0][0][1] = my;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||||
s->mv_type = MV_TYPE_8X8;
|
s->mv_type = MV_TYPE_8X8;
|
||||||
for(i=0;i<4;i++) {
|
for(i=0;i<4;i++) {
|
||||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||||
@ -1314,7 +1314,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* if we skipped it in the future P Frame than skip it now too */
|
/* if we skipped it in the future P Frame than skip it now too */
|
||||||
s->mb_skipped= s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
|
s->mb_skipped = s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
|
||||||
|
|
||||||
if(s->mb_skipped){
|
if(s->mb_skipped){
|
||||||
/* skip mb */
|
/* skip mb */
|
||||||
@ -1327,7 +1327,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
|||||||
s->mv[0][0][1] = 0;
|
s->mv[0][0][1] = 0;
|
||||||
s->mv[1][0][0] = 0;
|
s->mv[1][0][0] = 0;
|
||||||
s->mv[1][0][1] = 0;
|
s->mv[1][0][1] = 0;
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1433,7 +1433,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
|||||||
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
|
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
|
||||||
mb_type |= ff_mpeg4_set_direct_mv(s, mx, my);
|
mb_type |= ff_mpeg4_set_direct_mv(s, mx, my);
|
||||||
}
|
}
|
||||||
s->current_picture.mb_type[xy]= mb_type;
|
s->current_picture.f.mb_type[xy] = mb_type;
|
||||||
} else { /* I-Frame */
|
} else { /* I-Frame */
|
||||||
do{
|
do{
|
||||||
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
|
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
|
||||||
@ -1448,9 +1448,9 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
|||||||
intra:
|
intra:
|
||||||
s->ac_pred = get_bits1(&s->gb);
|
s->ac_pred = get_bits1(&s->gb);
|
||||||
if(s->ac_pred)
|
if(s->ac_pred)
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
|
||||||
else
|
else
|
||||||
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
|
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
|
||||||
|
|
||||||
cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
|
cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
|
||||||
if(cbpy<0){
|
if(cbpy<0){
|
||||||
@ -1491,12 +1491,12 @@ end:
|
|||||||
if(mpeg4_is_resync(s)){
|
if(mpeg4_is_resync(s)){
|
||||||
const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
|
const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
|
||||||
|
|
||||||
if(s->pict_type==AV_PICTURE_TYPE_B && s->next_picture.mbskip_table[xy + delta]){
|
if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture.f.mbskip_table[xy + delta]) {
|
||||||
ff_thread_await_progress((AVFrame*)s->next_picture_ptr,
|
ff_thread_await_progress((AVFrame*)s->next_picture_ptr,
|
||||||
(s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y+1, s->mb_height-1) : s->mb_y, 0);
|
(s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y+1, s->mb_height-1) : s->mb_y, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(s->pict_type==AV_PICTURE_TYPE_B && s->next_picture.mbskip_table[xy + delta])
|
if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture.f.mbskip_table[xy + delta])
|
||||||
return SLICE_OK;
|
return SLICE_OK;
|
||||||
return SLICE_END;
|
return SLICE_END;
|
||||||
}
|
}
|
||||||
@ -1961,11 +1961,12 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(s->avctx->time_base.num)
|
if(s->avctx->time_base.num)
|
||||||
s->current_picture_ptr->pts= (s->time + s->avctx->time_base.num/2) / s->avctx->time_base.num;
|
s->current_picture_ptr->f.pts = (s->time + s->avctx->time_base.num / 2) / s->avctx->time_base.num;
|
||||||
else
|
else
|
||||||
s->current_picture_ptr->pts= AV_NOPTS_VALUE;
|
s->current_picture_ptr->f.pts = AV_NOPTS_VALUE;
|
||||||
if(s->avctx->debug&FF_DEBUG_PTS)
|
if(s->avctx->debug&FF_DEBUG_PTS)
|
||||||
av_log(s->avctx, AV_LOG_DEBUG, "MPEG4 PTS: %"PRId64"\n", s->current_picture_ptr->pts);
|
av_log(s->avctx, AV_LOG_DEBUG, "MPEG4 PTS: %"PRId64"\n",
|
||||||
|
s->current_picture_ptr->f.pts);
|
||||||
|
|
||||||
check_marker(gb, "before vop_coded");
|
check_marker(gb, "before vop_coded");
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const
|
|||||||
{
|
{
|
||||||
int score= 0;
|
int score= 0;
|
||||||
int i, n;
|
int i, n;
|
||||||
int8_t * const qscale_table= s->current_picture.qscale_table;
|
int8_t * const qscale_table = s->current_picture.f.qscale_table;
|
||||||
|
|
||||||
memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6);
|
memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6);
|
||||||
|
|
||||||
@ -201,7 +201,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const
|
|||||||
*/
|
*/
|
||||||
void ff_clean_mpeg4_qscales(MpegEncContext *s){
|
void ff_clean_mpeg4_qscales(MpegEncContext *s){
|
||||||
int i;
|
int i;
|
||||||
int8_t * const qscale_table= s->current_picture.qscale_table;
|
int8_t * const qscale_table = s->current_picture.f.qscale_table;
|
||||||
|
|
||||||
ff_clean_h263_qscales(s);
|
ff_clean_h263_qscales(s);
|
||||||
|
|
||||||
@ -457,7 +457,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
|||||||
assert(mb_type>=0);
|
assert(mb_type>=0);
|
||||||
|
|
||||||
/* nothing to do if this MB was skipped in the next P Frame */
|
/* nothing to do if this MB was skipped in the next P Frame */
|
||||||
if(s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]){ //FIXME avoid DCT & ...
|
if (s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { //FIXME avoid DCT & ...
|
||||||
s->skip_count++;
|
s->skip_count++;
|
||||||
s->mv[0][0][0]=
|
s->mv[0][0][0]=
|
||||||
s->mv[0][0][1]=
|
s->mv[0][0][1]=
|
||||||
@ -589,7 +589,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
|||||||
if(y+16 > s->height) y= s->height-16;
|
if(y+16 > s->height) y= s->height-16;
|
||||||
|
|
||||||
offset= x + y*s->linesize;
|
offset= x + y*s->linesize;
|
||||||
p_pic= s->new_picture.data[0] + offset;
|
p_pic = s->new_picture.f.data[0] + offset;
|
||||||
|
|
||||||
s->mb_skipped=1;
|
s->mb_skipped=1;
|
||||||
for(i=0; i<s->max_b_frames; i++){
|
for(i=0; i<s->max_b_frames; i++){
|
||||||
@ -597,10 +597,11 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
|||||||
int diff;
|
int diff;
|
||||||
Picture *pic= s->reordered_input_picture[i+1];
|
Picture *pic= s->reordered_input_picture[i+1];
|
||||||
|
|
||||||
if(pic==NULL || pic->pict_type!=AV_PICTURE_TYPE_B) break;
|
if (pic == NULL || pic->f.pict_type != AV_PICTURE_TYPE_B)
|
||||||
|
break;
|
||||||
|
|
||||||
b_pic= pic->data[0] + offset;
|
b_pic = pic->f.data[0] + offset;
|
||||||
if(pic->type != FF_BUFFER_TYPE_SHARED)
|
if (pic->f.type != FF_BUFFER_TYPE_SHARED)
|
||||||
b_pic+= INPLACE_OFFSET;
|
b_pic+= INPLACE_OFFSET;
|
||||||
diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
|
diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
|
||||||
if(diff>s->qscale*70){ //FIXME check that 70 is optimal
|
if(diff>s->qscale*70){ //FIXME check that 70 is optimal
|
||||||
@ -704,8 +705,8 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
|||||||
/* motion vectors: 8x8 mode*/
|
/* motion vectors: 8x8 mode*/
|
||||||
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||||
|
|
||||||
ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x,
|
ff_h263_encode_motion_vector(s, s->current_picture.f.motion_val[0][ s->block_index[i] ][0] - pred_x,
|
||||||
s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
|
s->current_picture.f.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -814,9 +815,9 @@ static void mpeg4_encode_gop_header(MpegEncContext * s){
|
|||||||
put_bits(&s->pb, 16, 0);
|
put_bits(&s->pb, 16, 0);
|
||||||
put_bits(&s->pb, 16, GOP_STARTCODE);
|
put_bits(&s->pb, 16, GOP_STARTCODE);
|
||||||
|
|
||||||
time= s->current_picture_ptr->pts;
|
time = s->current_picture_ptr->f.pts;
|
||||||
if(s->reordered_input_picture[1])
|
if(s->reordered_input_picture[1])
|
||||||
time= FFMIN(time, s->reordered_input_picture[1]->pts);
|
time = FFMIN(time, s->reordered_input_picture[1]->f.pts);
|
||||||
time= time*s->avctx->time_base.num;
|
time= time*s->avctx->time_base.num;
|
||||||
|
|
||||||
seconds= time/s->avctx->time_base.den;
|
seconds= time/s->avctx->time_base.den;
|
||||||
@ -1026,7 +1027,7 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
|
|||||||
}
|
}
|
||||||
put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
|
put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
|
||||||
if(!s->progressive_sequence){
|
if(!s->progressive_sequence){
|
||||||
put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first);
|
put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
|
||||||
put_bits(&s->pb, 1, s->alternate_scan);
|
put_bits(&s->pb, 1, s->alternate_scan);
|
||||||
}
|
}
|
||||||
//FIXME sprite stuff
|
//FIXME sprite stuff
|
||||||
|
@ -198,7 +198,7 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
|
|||||||
|
|
||||||
void ff_copy_picture(Picture *dst, Picture *src){
|
void ff_copy_picture(Picture *dst, Picture *src){
|
||||||
*dst = *src;
|
*dst = *src;
|
||||||
dst->type= FF_BUFFER_TYPE_COPY;
|
dst->f.type= FF_BUFFER_TYPE_COPY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -207,7 +207,7 @@ void ff_copy_picture(Picture *dst, Picture *src){
|
|||||||
static void free_frame_buffer(MpegEncContext *s, Picture *pic)
|
static void free_frame_buffer(MpegEncContext *s, Picture *pic)
|
||||||
{
|
{
|
||||||
ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
|
ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
|
||||||
av_freep(&pic->hwaccel_picture_private);
|
av_freep(&pic->f.hwaccel_picture_private);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -220,8 +220,8 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
|
|||||||
if (s->avctx->hwaccel) {
|
if (s->avctx->hwaccel) {
|
||||||
assert(!pic->hwaccel_picture_private);
|
assert(!pic->hwaccel_picture_private);
|
||||||
if (s->avctx->hwaccel->priv_data_size) {
|
if (s->avctx->hwaccel->priv_data_size) {
|
||||||
pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
|
pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
|
||||||
if (!pic->hwaccel_picture_private) {
|
if (!pic->f.hwaccel_picture_private) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
|
av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -230,19 +230,20 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
|
|||||||
|
|
||||||
r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
|
r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
|
||||||
|
|
||||||
if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
|
if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
|
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
|
||||||
av_freep(&pic->hwaccel_picture_private);
|
r, pic->f.age, pic->f.type, pic->f.data[0]);
|
||||||
|
av_freep(&pic->f.hwaccel_picture_private);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
|
if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
|
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
|
||||||
free_frame_buffer(s, pic);
|
free_frame_buffer(s, pic);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pic->linesize[1] != pic->linesize[2]) {
|
if (pic->f.linesize[1] != pic->f.linesize[2]) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
|
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
|
||||||
free_frame_buffer(s, pic);
|
free_frame_buffer(s, pic);
|
||||||
return -1;
|
return -1;
|
||||||
@ -264,59 +265,59 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
|
|||||||
int r= -1;
|
int r= -1;
|
||||||
|
|
||||||
if(shared){
|
if(shared){
|
||||||
assert(pic->data[0]);
|
assert(pic->f.data[0]);
|
||||||
assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
|
assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
|
||||||
pic->type= FF_BUFFER_TYPE_SHARED;
|
pic->f.type = FF_BUFFER_TYPE_SHARED;
|
||||||
}else{
|
}else{
|
||||||
assert(!pic->data[0]);
|
assert(!pic->f.data[0]);
|
||||||
|
|
||||||
if (alloc_frame_buffer(s, pic) < 0)
|
if (alloc_frame_buffer(s, pic) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
s->linesize = pic->linesize[0];
|
s->linesize = pic->f.linesize[0];
|
||||||
s->uvlinesize= pic->linesize[1];
|
s->uvlinesize = pic->f.linesize[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
if(pic->qscale_table==NULL){
|
if (pic->f.qscale_table == NULL) {
|
||||||
if (s->encoding) {
|
if (s->encoding) {
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
|
||||||
}
|
}
|
||||||
|
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
|
||||||
pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
|
pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1;
|
||||||
pic->qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
|
pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
|
||||||
if(s->out_format == FMT_H264){
|
if(s->out_format == FMT_H264){
|
||||||
for(i=0; i<2; i++){
|
for(i=0; i<2; i++){
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
|
||||||
pic->motion_val[i]= pic->motion_val_base[i]+4;
|
pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
|
||||||
}
|
}
|
||||||
pic->motion_subsample_log2= 2;
|
pic->f.motion_subsample_log2 = 2;
|
||||||
}else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
|
}else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
|
||||||
for(i=0; i<2; i++){
|
for(i=0; i<2; i++){
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
|
||||||
pic->motion_val[i]= pic->motion_val_base[i]+4;
|
pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
|
||||||
}
|
}
|
||||||
pic->motion_subsample_log2= 3;
|
pic->f.motion_subsample_log2 = 3;
|
||||||
}
|
}
|
||||||
if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
|
if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
|
||||||
}
|
}
|
||||||
pic->qstride= s->mb_stride;
|
pic->f.qstride = s->mb_stride;
|
||||||
FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
|
FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* It might be nicer if the application would keep track of these
|
/* It might be nicer if the application would keep track of these
|
||||||
* but it would require an API change. */
|
* but it would require an API change. */
|
||||||
memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
|
memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
|
||||||
s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
|
s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
|
||||||
if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
|
if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
|
||||||
pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
|
pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
|
||||||
pic->owner2 = NULL;
|
pic->owner2 = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -332,30 +333,30 @@ fail: //for the FF_ALLOCZ_OR_GOTO macro
|
|||||||
static void free_picture(MpegEncContext *s, Picture *pic){
|
static void free_picture(MpegEncContext *s, Picture *pic){
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
|
if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
|
||||||
free_frame_buffer(s, pic);
|
free_frame_buffer(s, pic);
|
||||||
}
|
}
|
||||||
|
|
||||||
av_freep(&pic->mb_var);
|
av_freep(&pic->mb_var);
|
||||||
av_freep(&pic->mc_mb_var);
|
av_freep(&pic->mc_mb_var);
|
||||||
av_freep(&pic->mb_mean);
|
av_freep(&pic->mb_mean);
|
||||||
av_freep(&pic->mbskip_table);
|
av_freep(&pic->f.mbskip_table);
|
||||||
av_freep(&pic->qscale_table_base);
|
av_freep(&pic->qscale_table_base);
|
||||||
av_freep(&pic->mb_type_base);
|
av_freep(&pic->mb_type_base);
|
||||||
av_freep(&pic->dct_coeff);
|
av_freep(&pic->f.dct_coeff);
|
||||||
av_freep(&pic->pan_scan);
|
av_freep(&pic->f.pan_scan);
|
||||||
pic->mb_type= NULL;
|
pic->f.mb_type = NULL;
|
||||||
for(i=0; i<2; i++){
|
for(i=0; i<2; i++){
|
||||||
av_freep(&pic->motion_val_base[i]);
|
av_freep(&pic->motion_val_base[i]);
|
||||||
av_freep(&pic->ref_index[i]);
|
av_freep(&pic->f.ref_index[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(pic->type == FF_BUFFER_TYPE_SHARED){
|
if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
pic->base[i]=
|
pic->f.base[i] =
|
||||||
pic->data[i]= NULL;
|
pic->f.data[i] = NULL;
|
||||||
}
|
}
|
||||||
pic->type= 0;
|
pic->f.type = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -526,7 +527,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src
|
|||||||
|
|
||||||
if(!s1->first_field){
|
if(!s1->first_field){
|
||||||
s->last_pict_type= s1->pict_type;
|
s->last_pict_type= s1->pict_type;
|
||||||
if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
|
if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
|
||||||
|
|
||||||
if(s1->pict_type!=FF_B_TYPE){
|
if(s1->pict_type!=FF_B_TYPE){
|
||||||
s->last_non_b_pict_type= s1->pict_type;
|
s->last_non_b_pict_type= s1->pict_type;
|
||||||
@ -965,7 +966,7 @@ void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
|
|||||||
|
|
||||||
/* release non reference frames */
|
/* release non reference frames */
|
||||||
for(i=0; i<s->picture_count; i++){
|
for(i=0; i<s->picture_count; i++){
|
||||||
if(s->picture[i].data[0] && !s->picture[i].reference
|
if (s->picture[i].f.data[0] && !s->picture[i].f.reference
|
||||||
&& (!s->picture[i].owner2 || s->picture[i].owner2 == s)
|
&& (!s->picture[i].owner2 || s->picture[i].owner2 == s)
|
||||||
&& (remove_current || &s->picture[i] != s->current_picture_ptr)
|
&& (remove_current || &s->picture[i] != s->current_picture_ptr)
|
||||||
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
|
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
|
||||||
@ -979,14 +980,17 @@ int ff_find_unused_picture(MpegEncContext *s, int shared){
|
|||||||
|
|
||||||
if(shared){
|
if(shared){
|
||||||
for(i=s->picture_range_start; i<s->picture_range_end; i++){
|
for(i=s->picture_range_start; i<s->picture_range_end; i++){
|
||||||
if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
|
if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
|
||||||
|
return i;
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
for(i=s->picture_range_start; i<s->picture_range_end; i++){
|
for(i=s->picture_range_start; i<s->picture_range_end; i++){
|
||||||
if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
|
if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
|
||||||
|
return i; //FIXME
|
||||||
}
|
}
|
||||||
for(i=s->picture_range_start; i<s->picture_range_end; i++){
|
for(i=s->picture_range_start; i<s->picture_range_end; i++){
|
||||||
if(s->picture[i].data[0]==NULL) return i;
|
if (s->picture[i].f.data[0] == NULL)
|
||||||
|
return i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1035,7 +1039,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
|
assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
|
||||||
|
|
||||||
/* mark&release old frames */
|
/* mark&release old frames */
|
||||||
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
|
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
|
||||||
if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
|
if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
|
||||||
free_frame_buffer(s, s->last_picture_ptr);
|
free_frame_buffer(s, s->last_picture_ptr);
|
||||||
|
|
||||||
@ -1043,7 +1047,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
/* if(mpeg124/h263) */
|
/* if(mpeg124/h263) */
|
||||||
if(!s->encoding){
|
if(!s->encoding){
|
||||||
for(i=0; i<s->picture_count; i++){
|
for(i=0; i<s->picture_count; i++){
|
||||||
if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
|
if (s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
|
av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
|
||||||
free_frame_buffer(s, &s->picture[i]);
|
free_frame_buffer(s, &s->picture[i]);
|
||||||
}
|
}
|
||||||
@ -1055,41 +1059,41 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
if(!s->encoding){
|
if(!s->encoding){
|
||||||
ff_release_unused_pictures(s, 1);
|
ff_release_unused_pictures(s, 1);
|
||||||
|
|
||||||
if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
|
if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
|
||||||
pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
|
pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
|
||||||
else{
|
else{
|
||||||
i= ff_find_unused_picture(s, 0);
|
i= ff_find_unused_picture(s, 0);
|
||||||
pic= &s->picture[i];
|
pic= &s->picture[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
pic->reference= 0;
|
pic->f.reference = 0;
|
||||||
if (!s->dropable){
|
if (!s->dropable){
|
||||||
if (s->codec_id == CODEC_ID_H264)
|
if (s->codec_id == CODEC_ID_H264)
|
||||||
pic->reference = s->picture_structure;
|
pic->f.reference = s->picture_structure;
|
||||||
else if (s->pict_type != AV_PICTURE_TYPE_B)
|
else if (s->pict_type != AV_PICTURE_TYPE_B)
|
||||||
pic->reference = 3;
|
pic->f.reference = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
pic->coded_picture_number= s->coded_picture_number++;
|
pic->f.coded_picture_number = s->coded_picture_number++;
|
||||||
|
|
||||||
if(ff_alloc_picture(s, pic, 0) < 0)
|
if(ff_alloc_picture(s, pic, 0) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
s->current_picture_ptr= pic;
|
s->current_picture_ptr= pic;
|
||||||
//FIXME use only the vars from current_pic
|
//FIXME use only the vars from current_pic
|
||||||
s->current_picture_ptr->top_field_first= s->top_field_first;
|
s->current_picture_ptr->f.top_field_first = s->top_field_first;
|
||||||
if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
|
if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
|
||||||
if(s->picture_structure != PICT_FRAME)
|
if(s->picture_structure != PICT_FRAME)
|
||||||
s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
|
s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
|
||||||
}
|
}
|
||||||
s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
|
s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
|
||||||
s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
|
s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->current_picture_ptr->pict_type= s->pict_type;
|
s->current_picture_ptr->f.pict_type = s->pict_type;
|
||||||
// if(s->flags && CODEC_FLAG_QSCALE)
|
// if(s->flags && CODEC_FLAG_QSCALE)
|
||||||
// s->current_picture_ptr->quality= s->new_picture_ptr->quality;
|
// s->current_picture_ptr->quality= s->new_picture_ptr->quality;
|
||||||
s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
|
s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
|
||||||
|
|
||||||
ff_copy_picture(&s->current_picture, s->current_picture_ptr);
|
ff_copy_picture(&s->current_picture, s->current_picture_ptr);
|
||||||
|
|
||||||
@ -1099,13 +1103,13 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
s->next_picture_ptr= s->current_picture_ptr;
|
s->next_picture_ptr= s->current_picture_ptr;
|
||||||
}
|
}
|
||||||
/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
|
/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
|
||||||
s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
|
s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
|
||||||
s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
|
s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
|
||||||
s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
|
s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
|
||||||
s->pict_type, s->dropable);*/
|
s->pict_type, s->dropable);*/
|
||||||
|
|
||||||
if(s->codec_id != CODEC_ID_H264){
|
if(s->codec_id != CODEC_ID_H264){
|
||||||
if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
|
if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
|
||||||
(s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
|
(s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
|
||||||
if (s->pict_type != AV_PICTURE_TYPE_I)
|
if (s->pict_type != AV_PICTURE_TYPE_I)
|
||||||
av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
|
av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
|
||||||
@ -1120,7 +1124,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
|
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
|
||||||
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
|
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
|
||||||
}
|
}
|
||||||
if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
|
if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
|
||||||
/* Allocate a dummy frame */
|
/* Allocate a dummy frame */
|
||||||
i= ff_find_unused_picture(s, 0);
|
i= ff_find_unused_picture(s, 0);
|
||||||
s->next_picture_ptr= &s->picture[i];
|
s->next_picture_ptr= &s->picture[i];
|
||||||
@ -1134,17 +1138,17 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
|
if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
|
||||||
if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
|
if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
|
||||||
|
|
||||||
assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
|
assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
|
||||||
|
|
||||||
if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
|
if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
|
||||||
int i;
|
int i;
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
if(s->picture_structure == PICT_BOTTOM_FIELD){
|
if(s->picture_structure == PICT_BOTTOM_FIELD){
|
||||||
s->current_picture.data[i] += s->current_picture.linesize[i];
|
s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
|
||||||
}
|
}
|
||||||
s->current_picture.linesize[i] *= 2;
|
s->current_picture.f.linesize[i] *= 2;
|
||||||
s->last_picture.linesize[i] *=2;
|
s->last_picture.f.linesize[i] *= 2;
|
||||||
s->next_picture.linesize[i] *=2;
|
s->next_picture.f.linesize[i] *= 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1187,18 +1191,18 @@ void MPV_frame_end(MpegEncContext *s)
|
|||||||
&& !s->avctx->hwaccel
|
&& !s->avctx->hwaccel
|
||||||
&& !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
|
&& !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
|
||||||
&& s->unrestricted_mv
|
&& s->unrestricted_mv
|
||||||
&& s->current_picture.reference
|
&& s->current_picture.f.reference
|
||||||
&& !s->intra_only
|
&& !s->intra_only
|
||||||
&& !(s->flags&CODEC_FLAG_EMU_EDGE)) {
|
&& !(s->flags&CODEC_FLAG_EMU_EDGE)) {
|
||||||
int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
|
int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
|
||||||
int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
|
int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
|
||||||
s->dsp.draw_edges(s->current_picture.data[0], s->linesize ,
|
s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
|
||||||
s->h_edge_pos , s->v_edge_pos,
|
s->h_edge_pos , s->v_edge_pos,
|
||||||
EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
|
EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
|
||||||
s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
|
s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
|
||||||
s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
|
s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
|
||||||
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
|
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
|
||||||
s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
|
s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
|
||||||
s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
|
s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
|
||||||
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
|
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
|
||||||
}
|
}
|
||||||
@ -1206,14 +1210,14 @@ void MPV_frame_end(MpegEncContext *s)
|
|||||||
emms_c();
|
emms_c();
|
||||||
|
|
||||||
s->last_pict_type = s->pict_type;
|
s->last_pict_type = s->pict_type;
|
||||||
s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
|
s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
|
||||||
if(s->pict_type!=AV_PICTURE_TYPE_B){
|
if(s->pict_type!=AV_PICTURE_TYPE_B){
|
||||||
s->last_non_b_pict_type= s->pict_type;
|
s->last_non_b_pict_type= s->pict_type;
|
||||||
}
|
}
|
||||||
#if 0
|
#if 0
|
||||||
/* copy back current_picture variables */
|
/* copy back current_picture variables */
|
||||||
for(i=0; i<MAX_PICTURE_COUNT; i++){
|
for(i=0; i<MAX_PICTURE_COUNT; i++){
|
||||||
if(s->picture[i].data[0] == s->current_picture.data[0]){
|
if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
|
||||||
s->picture[i]= s->current_picture;
|
s->picture[i]= s->current_picture;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1224,7 +1228,7 @@ void MPV_frame_end(MpegEncContext *s)
|
|||||||
if(s->encoding){
|
if(s->encoding){
|
||||||
/* release non-reference frames */
|
/* release non-reference frames */
|
||||||
for(i=0; i<s->picture_count; i++){
|
for(i=0; i<s->picture_count; i++){
|
||||||
if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
|
if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
|
||||||
free_frame_buffer(s, &s->picture[i]);
|
free_frame_buffer(s, &s->picture[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1237,7 +1241,7 @@ void MPV_frame_end(MpegEncContext *s)
|
|||||||
#endif
|
#endif
|
||||||
s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
|
s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
|
||||||
|
|
||||||
if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
|
if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
|
||||||
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
|
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1644,8 +1648,8 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
|||||||
const int s_mask= (2<<lowres)-1;
|
const int s_mask= (2<<lowres)-1;
|
||||||
const int h_edge_pos = s->h_edge_pos >> lowres;
|
const int h_edge_pos = s->h_edge_pos >> lowres;
|
||||||
const int v_edge_pos = s->v_edge_pos >> lowres;
|
const int v_edge_pos = s->v_edge_pos >> lowres;
|
||||||
linesize = s->current_picture.linesize[0] << field_based;
|
linesize = s->current_picture.f.linesize[0] << field_based;
|
||||||
uvlinesize = s->current_picture.linesize[1] << field_based;
|
uvlinesize = s->current_picture.f.linesize[1] << field_based;
|
||||||
|
|
||||||
if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
|
if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
|
||||||
motion_x/=2;
|
motion_x/=2;
|
||||||
@ -1702,7 +1706,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
|
if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
|
||||||
dest_y += s->linesize;
|
dest_y += s->linesize;
|
||||||
dest_cb+= s->uvlinesize;
|
dest_cb+= s->uvlinesize;
|
||||||
dest_cr+= s->uvlinesize;
|
dest_cr+= s->uvlinesize;
|
||||||
@ -1842,7 +1846,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
|
|||||||
s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
|
s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
|
||||||
} else {
|
} else {
|
||||||
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
|
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
|
||||||
ref_picture= s->current_picture_ptr->data;
|
ref_picture = s->current_picture_ptr->f.data;
|
||||||
}
|
}
|
||||||
|
|
||||||
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
||||||
@ -1858,7 +1862,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
|
|||||||
if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
|
if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
|
||||||
ref2picture= ref_picture;
|
ref2picture= ref_picture;
|
||||||
}else{
|
}else{
|
||||||
ref2picture= s->current_picture_ptr->data;
|
ref2picture = s->current_picture_ptr->f.data;
|
||||||
}
|
}
|
||||||
|
|
||||||
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
||||||
@ -1895,7 +1899,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
|
|||||||
|
|
||||||
//opposite parity is always in the same frame if this is second field
|
//opposite parity is always in the same frame if this is second field
|
||||||
if(!s->first_field){
|
if(!s->first_field){
|
||||||
ref_picture = s->current_picture_ptr->data;
|
ref_picture = s->current_picture_ptr->f.data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2024,7 +2028,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
|||||||
if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
|
if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
|
||||||
/* save DCT coefficients */
|
/* save DCT coefficients */
|
||||||
int i,j;
|
int i,j;
|
||||||
DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
|
DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
|
||||||
av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
|
av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
|
||||||
for(i=0; i<6; i++){
|
for(i=0; i<6; i++){
|
||||||
for(j=0; j<64; j++){
|
for(j=0; j<64; j++){
|
||||||
@ -2035,7 +2039,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s->current_picture.qscale_table[mb_xy]= s->qscale;
|
s->current_picture.f.qscale_table[mb_xy] = s->qscale;
|
||||||
|
|
||||||
/* update DC predictors for P macroblocks */
|
/* update DC predictors for P macroblocks */
|
||||||
if (!s->mb_intra) {
|
if (!s->mb_intra) {
|
||||||
@ -2056,8 +2060,8 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
|||||||
int dct_linesize, dct_offset;
|
int dct_linesize, dct_offset;
|
||||||
op_pixels_func (*op_pix)[4];
|
op_pixels_func (*op_pix)[4];
|
||||||
qpel_mc_func (*op_qpix)[16];
|
qpel_mc_func (*op_qpix)[16];
|
||||||
const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
|
const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
|
||||||
const int uvlinesize= s->current_picture.linesize[1];
|
const int uvlinesize = s->current_picture.f.linesize[1];
|
||||||
const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
|
const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
|
||||||
const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
|
const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
|
||||||
|
|
||||||
@ -2065,7 +2069,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
|||||||
/* skip only during decoding as we might trash the buffers during encoding a bit */
|
/* skip only during decoding as we might trash the buffers during encoding a bit */
|
||||||
if(!s->encoding){
|
if(!s->encoding){
|
||||||
uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
|
uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
|
||||||
const int age= s->current_picture.age;
|
const int age = s->current_picture.f.age;
|
||||||
|
|
||||||
assert(age);
|
assert(age);
|
||||||
|
|
||||||
@ -2077,10 +2081,10 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
|||||||
if(*mbskip_ptr >99) *mbskip_ptr= 99;
|
if(*mbskip_ptr >99) *mbskip_ptr= 99;
|
||||||
|
|
||||||
/* if previous was skipped too, then nothing to do ! */
|
/* if previous was skipped too, then nothing to do ! */
|
||||||
if (*mbskip_ptr >= age && s->current_picture.reference){
|
if (*mbskip_ptr >= age && s->current_picture.f.reference){
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else if(!s->current_picture.reference){
|
} else if(!s->current_picture.f.reference) {
|
||||||
(*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
|
(*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
|
||||||
if(*mbskip_ptr >99) *mbskip_ptr= 99;
|
if(*mbskip_ptr >99) *mbskip_ptr= 99;
|
||||||
} else{
|
} else{
|
||||||
@ -2119,11 +2123,11 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
|||||||
h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
|
h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
|
||||||
|
|
||||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||||
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
|
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
|
||||||
op_pix = s->dsp.avg_h264_chroma_pixels_tab;
|
op_pix = s->dsp.avg_h264_chroma_pixels_tab;
|
||||||
}
|
}
|
||||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||||
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
|
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
op_qpix= s->me.qpel_put;
|
op_qpix= s->me.qpel_put;
|
||||||
@ -2133,12 +2137,12 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
|||||||
op_pix = s->dsp.put_no_rnd_pixels_tab;
|
op_pix = s->dsp.put_no_rnd_pixels_tab;
|
||||||
}
|
}
|
||||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
|
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
|
||||||
op_pix = s->dsp.avg_pixels_tab;
|
op_pix = s->dsp.avg_pixels_tab;
|
||||||
op_qpix= s->me.qpel_avg;
|
op_qpix= s->me.qpel_avg;
|
||||||
}
|
}
|
||||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
|
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2287,7 +2291,7 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
|
|||||||
if (!s->avctx->hwaccel
|
if (!s->avctx->hwaccel
|
||||||
&& !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
|
&& !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
|
||||||
&& s->unrestricted_mv
|
&& s->unrestricted_mv
|
||||||
&& s->current_picture.reference
|
&& s->current_picture.f.reference
|
||||||
&& !s->intra_only
|
&& !s->intra_only
|
||||||
&& !(s->flags&CODEC_FLAG_EMU_EDGE)) {
|
&& !(s->flags&CODEC_FLAG_EMU_EDGE)) {
|
||||||
int sides = 0, edge_h;
|
int sides = 0, edge_h;
|
||||||
@ -2298,11 +2302,11 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
|
|||||||
|
|
||||||
edge_h= FFMIN(h, s->v_edge_pos - y);
|
edge_h= FFMIN(h, s->v_edge_pos - y);
|
||||||
|
|
||||||
s->dsp.draw_edges(s->current_picture_ptr->data[0] + y *s->linesize , s->linesize,
|
s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize , s->linesize,
|
||||||
s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
|
s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
|
||||||
s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
|
s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
|
||||||
s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
|
s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
|
||||||
s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
|
s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
|
||||||
s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
|
s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2341,8 +2345,8 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
|
void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
|
||||||
const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
|
const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
|
||||||
const int uvlinesize= s->current_picture.linesize[1];
|
const int uvlinesize = s->current_picture.f.linesize[1];
|
||||||
const int mb_size= 4 - s->avctx->lowres;
|
const int mb_size= 4 - s->avctx->lowres;
|
||||||
|
|
||||||
s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
|
s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
|
||||||
@ -2353,9 +2357,9 @@ void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
|
|||||||
s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
|
s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
|
||||||
//block_index is not used by mpeg2, so it is not affected by chroma_format
|
//block_index is not used by mpeg2, so it is not affected by chroma_format
|
||||||
|
|
||||||
s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
|
s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
|
||||||
s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
|
s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
|
||||||
s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
|
s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
|
||||||
|
|
||||||
if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
|
if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
|
||||||
{
|
{
|
||||||
@ -2380,8 +2384,9 @@ void ff_mpeg_flush(AVCodecContext *avctx){
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
for(i=0; i<s->picture_count; i++){
|
for(i=0; i<s->picture_count; i++){
|
||||||
if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
|
if (s->picture[i].f.data[0] &&
|
||||||
|| s->picture[i].type == FF_BUFFER_TYPE_USER))
|
(s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
|
||||||
|
s->picture[i].f.type == FF_BUFFER_TYPE_USER))
|
||||||
free_frame_buffer(s, &s->picture[i]);
|
free_frame_buffer(s, &s->picture[i]);
|
||||||
}
|
}
|
||||||
s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
|
s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#ifndef AVCODEC_MPEGVIDEO_H
|
#ifndef AVCODEC_MPEGVIDEO_H
|
||||||
#define AVCODEC_MPEGVIDEO_H
|
#define AVCODEC_MPEGVIDEO_H
|
||||||
|
|
||||||
|
#include "avcodec.h"
|
||||||
#include "dsputil.h"
|
#include "dsputil.h"
|
||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
#include "put_bits.h"
|
#include "put_bits.h"
|
||||||
@ -82,7 +83,7 @@ struct MpegEncContext;
|
|||||||
* Picture.
|
* Picture.
|
||||||
*/
|
*/
|
||||||
typedef struct Picture{
|
typedef struct Picture{
|
||||||
FF_COMMON_FRAME
|
struct AVFrame f;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* halfpel luma planes.
|
* halfpel luma planes.
|
||||||
|
@ -255,8 +255,8 @@ if(s->quarter_sample)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
v_edge_pos = s->v_edge_pos >> field_based;
|
v_edge_pos = s->v_edge_pos >> field_based;
|
||||||
linesize = s->current_picture.linesize[0] << field_based;
|
linesize = s->current_picture.f.linesize[0] << field_based;
|
||||||
uvlinesize = s->current_picture.linesize[1] << field_based;
|
uvlinesize = s->current_picture.f.linesize[1] << field_based;
|
||||||
|
|
||||||
dxy = ((motion_y & 1) << 1) | (motion_x & 1);
|
dxy = ((motion_y & 1) << 1) | (motion_x & 1);
|
||||||
src_x = s->mb_x* 16 + (motion_x >> 1);
|
src_x = s->mb_x* 16 + (motion_x >> 1);
|
||||||
@ -657,30 +657,30 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
|||||||
|
|
||||||
assert(!s->mb_skipped);
|
assert(!s->mb_skipped);
|
||||||
|
|
||||||
memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4);
|
memcpy(mv_cache[1][1], s->current_picture.f.motion_val[0][mot_xy ], sizeof(int16_t) * 4);
|
||||||
memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
|
memcpy(mv_cache[2][1], s->current_picture.f.motion_val[0][mot_xy + mot_stride], sizeof(int16_t) * 4);
|
||||||
memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
|
memcpy(mv_cache[3][1], s->current_picture.f.motion_val[0][mot_xy + mot_stride], sizeof(int16_t) * 4);
|
||||||
|
|
||||||
if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){
|
if (mb_y == 0 || IS_INTRA(s->current_picture.f.mb_type[xy - s->mb_stride])) {
|
||||||
memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
|
memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
|
||||||
}else{
|
}else{
|
||||||
memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4);
|
memcpy(mv_cache[0][1], s->current_picture.f.motion_val[0][mot_xy - mot_stride], sizeof(int16_t) * 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){
|
if (mb_x == 0 || IS_INTRA(s->current_picture.f.mb_type[xy - 1])) {
|
||||||
AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
|
AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
|
||||||
AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
|
AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
|
||||||
}else{
|
}else{
|
||||||
AV_COPY32(mv_cache[1][0], s->current_picture.motion_val[0][mot_xy-1]);
|
AV_COPY32(mv_cache[1][0], s->current_picture.f.motion_val[0][mot_xy - 1]);
|
||||||
AV_COPY32(mv_cache[2][0], s->current_picture.motion_val[0][mot_xy-1+mot_stride]);
|
AV_COPY32(mv_cache[2][0], s->current_picture.f.motion_val[0][mot_xy - 1 + mot_stride]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){
|
if (mb_x + 1 >= s->mb_width || IS_INTRA(s->current_picture.f.mb_type[xy + 1])) {
|
||||||
AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
|
AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
|
||||||
AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
|
AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
|
||||||
}else{
|
}else{
|
||||||
AV_COPY32(mv_cache[1][3], s->current_picture.motion_val[0][mot_xy+2]);
|
AV_COPY32(mv_cache[1][3], s->current_picture.f.motion_val[0][mot_xy + 2]);
|
||||||
AV_COPY32(mv_cache[2][3], s->current_picture.motion_val[0][mot_xy+2+mot_stride]);
|
AV_COPY32(mv_cache[2][3], s->current_picture.f.motion_val[0][mot_xy + 2 + mot_stride]);
|
||||||
}
|
}
|
||||||
|
|
||||||
mx = 0;
|
mx = 0;
|
||||||
@ -817,7 +817,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
|
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
|
||||||
ref_picture= s->current_picture_ptr->data;
|
ref_picture = s->current_picture_ptr->f.data;
|
||||||
}
|
}
|
||||||
|
|
||||||
mpeg_motion(s, dest_y, dest_cb, dest_cr,
|
mpeg_motion(s, dest_y, dest_cb, dest_cr,
|
||||||
@ -834,7 +834,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
|||||||
|| s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
|
|| s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
|
||||||
ref2picture= ref_picture;
|
ref2picture= ref_picture;
|
||||||
}else{
|
}else{
|
||||||
ref2picture= s->current_picture_ptr->data;
|
ref2picture = s->current_picture_ptr->f.data;
|
||||||
}
|
}
|
||||||
|
|
||||||
mpeg_motion(s, dest_y, dest_cb, dest_cr,
|
mpeg_motion(s, dest_y, dest_cb, dest_cr,
|
||||||
@ -871,7 +871,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
|||||||
|
|
||||||
//opposite parity is always in the same frame if this is second field
|
//opposite parity is always in the same frame if this is second field
|
||||||
if(!s->first_field){
|
if(!s->first_field){
|
||||||
ref_picture = s->current_picture_ptr->data;
|
ref_picture = s->current_picture_ptr->f.data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -158,7 +158,7 @@ void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix){
|
|||||||
* init s->current_picture.qscale_table from s->lambda_table
|
* init s->current_picture.qscale_table from s->lambda_table
|
||||||
*/
|
*/
|
||||||
void ff_init_qscale_tab(MpegEncContext *s){
|
void ff_init_qscale_tab(MpegEncContext *s){
|
||||||
int8_t * const qscale_table= s->current_picture.qscale_table;
|
int8_t * const qscale_table = s->current_picture.f.qscale_table;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for(i=0; i<s->mb_num; i++){
|
for(i=0; i<s->mb_num; i++){
|
||||||
@ -915,12 +915,12 @@ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){
|
|||||||
int64_t score64=0;
|
int64_t score64=0;
|
||||||
|
|
||||||
for(plane=0; plane<3; plane++){
|
for(plane=0; plane<3; plane++){
|
||||||
const int stride= p->linesize[plane];
|
const int stride = p->f.linesize[plane];
|
||||||
const int bw= plane ? 1 : 2;
|
const int bw= plane ? 1 : 2;
|
||||||
for(y=0; y<s->mb_height*bw; y++){
|
for(y=0; y<s->mb_height*bw; y++){
|
||||||
for(x=0; x<s->mb_width*bw; x++){
|
for(x=0; x<s->mb_width*bw; x++){
|
||||||
int off= p->type == FF_BUFFER_TYPE_SHARED ? 0: 16;
|
int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0: 16;
|
||||||
int v= s->dsp.frame_skip_cmp[1](s, p->data[plane] + 8*(x + y*stride)+off, ref->data[plane] + 8*(x + y*stride), stride, 8);
|
int v = s->dsp.frame_skip_cmp[1](s, p->f.data[plane] + 8*(x + y*stride)+off, ref->f.data[plane] + 8*(x + y*stride), stride, 8);
|
||||||
|
|
||||||
switch(s->avctx->frame_skip_exp){
|
switch(s->avctx->frame_skip_exp){
|
||||||
case 0: score= FFMAX(score, v); break;
|
case 0: score= FFMAX(score, v); break;
|
||||||
@ -992,15 +992,15 @@ static int estimate_best_b_count(MpegEncContext *s){
|
|||||||
if(pre_input_ptr && (!i || s->input_picture[i-1])) {
|
if(pre_input_ptr && (!i || s->input_picture[i-1])) {
|
||||||
pre_input= *pre_input_ptr;
|
pre_input= *pre_input_ptr;
|
||||||
|
|
||||||
if(pre_input.type != FF_BUFFER_TYPE_SHARED && i) {
|
if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
|
||||||
pre_input.data[0]+=INPLACE_OFFSET;
|
pre_input.f.data[0] += INPLACE_OFFSET;
|
||||||
pre_input.data[1]+=INPLACE_OFFSET;
|
pre_input.f.data[1] += INPLACE_OFFSET;
|
||||||
pre_input.data[2]+=INPLACE_OFFSET;
|
pre_input.f.data[2] += INPLACE_OFFSET;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.data[0], pre_input.linesize[0], c->width, c->height);
|
s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.f.data[0], pre_input.f.linesize[0], c->width, c->height);
|
||||||
s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.data[1], pre_input.linesize[1], c->width>>1, c->height>>1);
|
s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.f.data[1], pre_input.f.linesize[1], c->width >> 1, c->height >> 1);
|
||||||
s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.data[2], pre_input.linesize[2], c->width>>1, c->height>>1);
|
s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.f.data[2], pre_input.f.linesize[2], c->width >> 1, c->height >> 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1062,20 +1062,20 @@ static int select_input_picture(MpegEncContext *s){
|
|||||||
if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
|
if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
|
||||||
if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
|
if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
|
||||||
s->reordered_input_picture[0]= s->input_picture[0];
|
s->reordered_input_picture[0]= s->input_picture[0];
|
||||||
s->reordered_input_picture[0]->pict_type= AV_PICTURE_TYPE_I;
|
s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
|
||||||
s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
|
s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++;
|
||||||
}else{
|
}else{
|
||||||
int b_frames;
|
int b_frames;
|
||||||
|
|
||||||
if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){
|
if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){
|
||||||
if(s->picture_in_gop_number < s->gop_size && skip_check(s, s->input_picture[0], s->next_picture_ptr)){
|
if(s->picture_in_gop_number < s->gop_size && skip_check(s, s->input_picture[0], s->next_picture_ptr)){
|
||||||
//FIXME check that te gop check above is +-1 correct
|
//FIXME check that te gop check above is +-1 correct
|
||||||
//av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n", s->input_picture[0]->data[0], s->input_picture[0]->pts);
|
//av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n", s->input_picture[0]->f.data[0], s->input_picture[0]->pts);
|
||||||
|
|
||||||
if(s->input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
|
if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
|
||||||
for(i=0; i<4; i++)
|
for(i=0; i<4; i++)
|
||||||
s->input_picture[0]->data[i]= NULL;
|
s->input_picture[0]->f.data[i] = NULL;
|
||||||
s->input_picture[0]->type= 0;
|
s->input_picture[0]->f.type = 0;
|
||||||
}else{
|
}else{
|
||||||
assert( s->input_picture[0]->type==FF_BUFFER_TYPE_USER
|
assert( s->input_picture[0]->type==FF_BUFFER_TYPE_USER
|
||||||
|| s->input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
|
|| s->input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
|
||||||
@ -1092,7 +1092,7 @@ static int select_input_picture(MpegEncContext *s){
|
|||||||
|
|
||||||
if(s->flags&CODEC_FLAG_PASS2){
|
if(s->flags&CODEC_FLAG_PASS2){
|
||||||
for(i=0; i<s->max_b_frames+1; i++){
|
for(i=0; i<s->max_b_frames+1; i++){
|
||||||
int pict_num= s->input_picture[0]->display_picture_number + i;
|
int pict_num = s->input_picture[0]->f.display_picture_number + i;
|
||||||
|
|
||||||
if(pict_num >= s->rc_context.num_entries)
|
if(pict_num >= s->rc_context.num_entries)
|
||||||
break;
|
break;
|
||||||
@ -1101,7 +1101,7 @@ static int select_input_picture(MpegEncContext *s){
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->input_picture[i]->pict_type=
|
s->input_picture[i]->f.pict_type =
|
||||||
s->rc_context.entry[pict_num].new_pict_type;
|
s->rc_context.entry[pict_num].new_pict_type;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1113,8 +1113,8 @@ static int select_input_picture(MpegEncContext *s){
|
|||||||
for(i=1; i<s->max_b_frames+1; i++){
|
for(i=1; i<s->max_b_frames+1; i++){
|
||||||
if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
|
if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
|
||||||
s->input_picture[i]->b_frame_score=
|
s->input_picture[i]->b_frame_score=
|
||||||
get_intra_count(s, s->input_picture[i ]->data[0],
|
get_intra_count(s, s->input_picture[i ]->f.data[0],
|
||||||
s->input_picture[i-1]->data[0], s->linesize) + 1;
|
s->input_picture[i-1]->f.data[0], s->linesize) + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for(i=0; i<s->max_b_frames+1; i++){
|
for(i=0; i<s->max_b_frames+1; i++){
|
||||||
@ -1140,11 +1140,11 @@ static int select_input_picture(MpegEncContext *s){
|
|||||||
//av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
|
//av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
|
||||||
|
|
||||||
for(i= b_frames - 1; i>=0; i--){
|
for(i= b_frames - 1; i>=0; i--){
|
||||||
int type= s->input_picture[i]->pict_type;
|
int type = s->input_picture[i]->f.pict_type;
|
||||||
if(type && type != AV_PICTURE_TYPE_B)
|
if(type && type != AV_PICTURE_TYPE_B)
|
||||||
b_frames= i;
|
b_frames= i;
|
||||||
}
|
}
|
||||||
if(s->input_picture[b_frames]->pict_type == AV_PICTURE_TYPE_B && b_frames == s->max_b_frames){
|
if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B && b_frames == s->max_b_frames){
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
|
av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1154,49 +1154,49 @@ static int select_input_picture(MpegEncContext *s){
|
|||||||
}else{
|
}else{
|
||||||
if(s->flags & CODEC_FLAG_CLOSED_GOP)
|
if(s->flags & CODEC_FLAG_CLOSED_GOP)
|
||||||
b_frames=0;
|
b_frames=0;
|
||||||
s->input_picture[b_frames]->pict_type= AV_PICTURE_TYPE_I;
|
s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if( (s->flags & CODEC_FLAG_CLOSED_GOP)
|
if( (s->flags & CODEC_FLAG_CLOSED_GOP)
|
||||||
&& b_frames
|
&& b_frames
|
||||||
&& s->input_picture[b_frames]->pict_type== AV_PICTURE_TYPE_I)
|
&& s->input_picture[b_frames]->f.pict_type== AV_PICTURE_TYPE_I)
|
||||||
b_frames--;
|
b_frames--;
|
||||||
|
|
||||||
s->reordered_input_picture[0]= s->input_picture[b_frames];
|
s->reordered_input_picture[0]= s->input_picture[b_frames];
|
||||||
if(s->reordered_input_picture[0]->pict_type != AV_PICTURE_TYPE_I)
|
if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
|
||||||
s->reordered_input_picture[0]->pict_type= AV_PICTURE_TYPE_P;
|
s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
|
||||||
s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
|
s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++;
|
||||||
for(i=0; i<b_frames; i++){
|
for(i=0; i<b_frames; i++){
|
||||||
s->reordered_input_picture[i+1]= s->input_picture[i];
|
s->reordered_input_picture[i + 1] = s->input_picture[i];
|
||||||
s->reordered_input_picture[i+1]->pict_type= AV_PICTURE_TYPE_B;
|
s->reordered_input_picture[i + 1]->f.pict_type = AV_PICTURE_TYPE_B;
|
||||||
s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
|
s->reordered_input_picture[i + 1]->f.coded_picture_number = s->coded_picture_number++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
no_output_pic:
|
no_output_pic:
|
||||||
if(s->reordered_input_picture[0]){
|
if(s->reordered_input_picture[0]){
|
||||||
s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=AV_PICTURE_TYPE_B ? 3 : 0;
|
s->reordered_input_picture[0]->f.reference = s->reordered_input_picture[0]->f.pict_type!=AV_PICTURE_TYPE_B ? 3 : 0;
|
||||||
|
|
||||||
ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
|
ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
|
||||||
|
|
||||||
if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size){
|
if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size) {
|
||||||
// input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable
|
// input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable
|
||||||
|
|
||||||
int i= ff_find_unused_picture(s, 0);
|
int i= ff_find_unused_picture(s, 0);
|
||||||
Picture *pic= &s->picture[i];
|
Picture *pic= &s->picture[i];
|
||||||
|
|
||||||
pic->reference = s->reordered_input_picture[0]->reference;
|
pic->f.reference = s->reordered_input_picture[0]->f.reference;
|
||||||
if(ff_alloc_picture(s, pic, 0) < 0){
|
if(ff_alloc_picture(s, pic, 0) < 0){
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mark us unused / free shared pic */
|
/* mark us unused / free shared pic */
|
||||||
if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_INTERNAL)
|
if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
|
||||||
s->avctx->release_buffer(s->avctx, (AVFrame*)s->reordered_input_picture[0]);
|
s->avctx->release_buffer(s->avctx, (AVFrame*)s->reordered_input_picture[0]);
|
||||||
for(i=0; i<4; i++)
|
for(i=0; i<4; i++)
|
||||||
s->reordered_input_picture[0]->data[i]= NULL;
|
s->reordered_input_picture[0]->f.data[i] = NULL;
|
||||||
s->reordered_input_picture[0]->type= 0;
|
s->reordered_input_picture[0]->f.type = 0;
|
||||||
|
|
||||||
copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
|
copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
|
||||||
|
|
||||||
@ -1209,12 +1209,12 @@ no_output_pic:
|
|||||||
|
|
||||||
s->current_picture_ptr= s->reordered_input_picture[0];
|
s->current_picture_ptr= s->reordered_input_picture[0];
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
s->new_picture.data[i]+= INPLACE_OFFSET;
|
s->new_picture.f.data[i] += INPLACE_OFFSET;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ff_copy_picture(&s->current_picture, s->current_picture_ptr);
|
ff_copy_picture(&s->current_picture, s->current_picture_ptr);
|
||||||
|
|
||||||
s->picture_number= s->new_picture.display_picture_number;
|
s->picture_number = s->new_picture.f.display_picture_number;
|
||||||
//printf("dpn:%d\n", s->picture_number);
|
//printf("dpn:%d\n", s->picture_number);
|
||||||
}else{
|
}else{
|
||||||
memset(&s->new_picture, 0, sizeof(Picture));
|
memset(&s->new_picture, 0, sizeof(Picture));
|
||||||
@ -1249,8 +1249,8 @@ int MPV_encode_picture(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* output? */
|
/* output? */
|
||||||
if(s->new_picture.data[0]){
|
if (s->new_picture.f.data[0]) {
|
||||||
s->pict_type= s->new_picture.pict_type;
|
s->pict_type = s->new_picture.f.pict_type;
|
||||||
//emms_c();
|
//emms_c();
|
||||||
//printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
|
//printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
|
||||||
MPV_frame_start(s, avctx);
|
MPV_frame_start(s, avctx);
|
||||||
@ -1307,8 +1307,8 @@ vbv_retry:
|
|||||||
ff_write_pass1_stats(s);
|
ff_write_pass1_stats(s);
|
||||||
|
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
s->current_picture_ptr->error[i]= s->current_picture.error[i];
|
s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
|
||||||
avctx->error[i] += s->current_picture_ptr->error[i];
|
avctx->error[i] += s->current_picture_ptr->f.error[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
if(s->flags&CODEC_FLAG_PASS1)
|
if(s->flags&CODEC_FLAG_PASS1)
|
||||||
@ -1508,7 +1508,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
|
|||||||
update_qscale(s);
|
update_qscale(s);
|
||||||
|
|
||||||
if(!(s->flags&CODEC_FLAG_QP_RD)){
|
if(!(s->flags&CODEC_FLAG_QP_RD)){
|
||||||
s->qscale= s->current_picture_ptr->qscale_table[mb_xy];
|
s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
|
||||||
s->dquant= s->qscale - last_qp;
|
s->dquant= s->qscale - last_qp;
|
||||||
|
|
||||||
if(s->out_format==FMT_H263){
|
if(s->out_format==FMT_H263){
|
||||||
@ -1532,9 +1532,9 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
|
|||||||
|
|
||||||
wrap_y = s->linesize;
|
wrap_y = s->linesize;
|
||||||
wrap_c = s->uvlinesize;
|
wrap_c = s->uvlinesize;
|
||||||
ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
|
ptr_y = s->new_picture.f.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
|
||||||
ptr_cb = s->new_picture.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
|
ptr_cb = s->new_picture.f.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
|
||||||
ptr_cr = s->new_picture.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
|
ptr_cr = s->new_picture.f.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
|
||||||
|
|
||||||
if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
|
if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
|
||||||
uint8_t *ebuf= s->edge_emu_buffer + 32;
|
uint8_t *ebuf= s->edge_emu_buffer + 32;
|
||||||
@ -1602,12 +1602,12 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
|
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
|
||||||
op_pix = s->dsp.avg_pixels_tab;
|
op_pix = s->dsp.avg_pixels_tab;
|
||||||
op_qpix= s->dsp.avg_qpel_pixels_tab;
|
op_qpix= s->dsp.avg_qpel_pixels_tab;
|
||||||
}
|
}
|
||||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
|
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(s->flags&CODEC_FLAG_INTERLACED_DCT){
|
if(s->flags&CODEC_FLAG_INTERLACED_DCT){
|
||||||
@ -1933,18 +1933,18 @@ static int sse_mb(MpegEncContext *s){
|
|||||||
|
|
||||||
if(w==16 && h==16)
|
if(w==16 && h==16)
|
||||||
if(s->avctx->mb_cmp == FF_CMP_NSSE){
|
if(s->avctx->mb_cmp == FF_CMP_NSSE){
|
||||||
return s->dsp.nsse[0](s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
|
return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
|
||||||
+s->dsp.nsse[1](s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
|
+s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
|
||||||
+s->dsp.nsse[1](s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
|
+s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
|
||||||
}else{
|
}else{
|
||||||
return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
|
return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
|
||||||
+s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
|
+s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
|
||||||
+s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
|
+s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
|
return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
|
||||||
+sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
|
+sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
|
||||||
+sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
|
+sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
|
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
|
||||||
@ -2003,7 +2003,7 @@ static int mb_var_thread(AVCodecContext *c, void *arg){
|
|||||||
for(mb_x=0; mb_x < s->mb_width; mb_x++) {
|
for(mb_x=0; mb_x < s->mb_width; mb_x++) {
|
||||||
int xx = mb_x * 16;
|
int xx = mb_x * 16;
|
||||||
int yy = mb_y * 16;
|
int yy = mb_y * 16;
|
||||||
uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
|
uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
|
||||||
int varc;
|
int varc;
|
||||||
int sum = s->dsp.pix_sum(pix, s->linesize);
|
int sum = s->dsp.pix_sum(pix, s->linesize);
|
||||||
|
|
||||||
@ -2070,7 +2070,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
|||||||
/* note: quant matrix value (8) is implied here */
|
/* note: quant matrix value (8) is implied here */
|
||||||
s->last_dc[i] = 128 << s->intra_dc_precision;
|
s->last_dc[i] = 128 << s->intra_dc_precision;
|
||||||
|
|
||||||
s->current_picture.error[i] = 0;
|
s->current_picture.f.error[i] = 0;
|
||||||
}
|
}
|
||||||
s->mb_skip_run = 0;
|
s->mb_skip_run = 0;
|
||||||
memset(s->last_mv, 0, sizeof(s->last_mv));
|
memset(s->last_mv, 0, sizeof(s->last_mv));
|
||||||
@ -2271,8 +2271,8 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
|||||||
s->mv_type = MV_TYPE_8X8;
|
s->mv_type = MV_TYPE_8X8;
|
||||||
s->mb_intra= 0;
|
s->mb_intra= 0;
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
|
s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
|
||||||
s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
|
s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
|
||||||
}
|
}
|
||||||
encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
|
encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
|
||||||
&dmin, &next_block, 0, 0);
|
&dmin, &next_block, 0, 0);
|
||||||
@ -2458,7 +2458,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s->current_picture.qscale_table[xy]= best_s.qscale;
|
s->current_picture.f.qscale_table[xy] = best_s.qscale;
|
||||||
|
|
||||||
copy_context_after_encode(s, &best_s, -1);
|
copy_context_after_encode(s, &best_s, -1);
|
||||||
|
|
||||||
@ -2525,8 +2525,8 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
|||||||
s->mv_type = MV_TYPE_8X8;
|
s->mv_type = MV_TYPE_8X8;
|
||||||
s->mb_intra= 0;
|
s->mb_intra= 0;
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
|
s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
|
||||||
s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
|
s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case CANDIDATE_MB_TYPE_DIRECT:
|
case CANDIDATE_MB_TYPE_DIRECT:
|
||||||
@ -2627,14 +2627,14 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
|||||||
if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
|
if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
|
||||||
if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
|
if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
|
||||||
|
|
||||||
s->current_picture.error[0] += sse(
|
s->current_picture.f.error[0] += sse(
|
||||||
s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
|
s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
|
||||||
s->dest[0], w, h, s->linesize);
|
s->dest[0], w, h, s->linesize);
|
||||||
s->current_picture.error[1] += sse(
|
s->current_picture.f.error[1] += sse(
|
||||||
s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
||||||
s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
|
s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
|
||||||
s->current_picture.error[2] += sse(
|
s->current_picture.f.error[2] += sse(
|
||||||
s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
||||||
s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
|
s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
|
||||||
}
|
}
|
||||||
if(s->loop_filter){
|
if(s->loop_filter){
|
||||||
@ -2685,9 +2685,9 @@ static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
|
|||||||
MERGE(misc_bits);
|
MERGE(misc_bits);
|
||||||
MERGE(error_count);
|
MERGE(error_count);
|
||||||
MERGE(padding_bug_score);
|
MERGE(padding_bug_score);
|
||||||
MERGE(current_picture.error[0]);
|
MERGE(current_picture.f.error[0]);
|
||||||
MERGE(current_picture.error[1]);
|
MERGE(current_picture.f.error[1]);
|
||||||
MERGE(current_picture.error[2]);
|
MERGE(current_picture.f.error[2]);
|
||||||
|
|
||||||
if(dst->avctx->noise_reduction){
|
if(dst->avctx->noise_reduction){
|
||||||
for(i=0; i<64; i++){
|
for(i=0; i<64; i++){
|
||||||
@ -2704,13 +2704,13 @@ static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
|
|||||||
|
|
||||||
static int estimate_qp(MpegEncContext *s, int dry_run){
|
static int estimate_qp(MpegEncContext *s, int dry_run){
|
||||||
if (s->next_lambda){
|
if (s->next_lambda){
|
||||||
s->current_picture_ptr->quality=
|
s->current_picture_ptr->f.quality =
|
||||||
s->current_picture.quality = s->next_lambda;
|
s->current_picture.f.quality = s->next_lambda;
|
||||||
if(!dry_run) s->next_lambda= 0;
|
if(!dry_run) s->next_lambda= 0;
|
||||||
} else if (!s->fixed_qscale) {
|
} else if (!s->fixed_qscale) {
|
||||||
s->current_picture_ptr->quality=
|
s->current_picture_ptr->f.quality =
|
||||||
s->current_picture.quality = ff_rate_estimate_qscale(s, dry_run);
|
s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
|
||||||
if (s->current_picture.quality < 0)
|
if (s->current_picture.f.quality < 0)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2733,7 +2733,7 @@ static int estimate_qp(MpegEncContext *s, int dry_run){
|
|||||||
s->lambda= s->lambda_table[0];
|
s->lambda= s->lambda_table[0];
|
||||||
//FIXME broken
|
//FIXME broken
|
||||||
}else
|
}else
|
||||||
s->lambda= s->current_picture.quality;
|
s->lambda = s->current_picture.f.quality;
|
||||||
//printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
|
//printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
|
||||||
update_qscale(s);
|
update_qscale(s);
|
||||||
return 0;
|
return 0;
|
||||||
@ -2742,7 +2742,7 @@ static int estimate_qp(MpegEncContext *s, int dry_run){
|
|||||||
/* must be called before writing the header */
|
/* must be called before writing the header */
|
||||||
static void set_frame_distances(MpegEncContext * s){
|
static void set_frame_distances(MpegEncContext * s){
|
||||||
assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE);
|
assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE);
|
||||||
s->time= s->current_picture_ptr->pts*s->avctx->time_base.num;
|
s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
|
||||||
|
|
||||||
if(s->pict_type==AV_PICTURE_TYPE_B){
|
if(s->pict_type==AV_PICTURE_TYPE_B){
|
||||||
s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
|
s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
|
||||||
@ -2916,12 +2916,12 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//FIXME var duplication
|
//FIXME var duplication
|
||||||
s->current_picture_ptr->key_frame=
|
s->current_picture_ptr->f.key_frame =
|
||||||
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
|
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
|
||||||
s->current_picture_ptr->pict_type=
|
s->current_picture_ptr->f.pict_type =
|
||||||
s->current_picture.pict_type= s->pict_type;
|
s->current_picture.f.pict_type = s->pict_type;
|
||||||
|
|
||||||
if(s->current_picture.key_frame)
|
if (s->current_picture.f.key_frame)
|
||||||
s->picture_in_gop_number=0;
|
s->picture_in_gop_number=0;
|
||||||
|
|
||||||
s->last_bits= put_bits_count(&s->pb);
|
s->last_bits= put_bits_count(&s->pb);
|
||||||
|
@ -780,10 +780,10 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
|
|||||||
}else{
|
}else{
|
||||||
if(n<4){
|
if(n<4){
|
||||||
wrap= s->linesize;
|
wrap= s->linesize;
|
||||||
dest= s->current_picture.data[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8;
|
dest= s->current_picture.f.data[0] + (((n >> 1) + 2*s->mb_y) * 8* wrap ) + ((n & 1) + 2*s->mb_x) * 8;
|
||||||
}else{
|
}else{
|
||||||
wrap= s->uvlinesize;
|
wrap= s->uvlinesize;
|
||||||
dest= s->current_picture.data[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
|
dest= s->current_picture.f.data[n - 3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
|
||||||
}
|
}
|
||||||
if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
|
if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
|
||||||
else a= get_dc(dest-8, wrap, scale*8);
|
else a= get_dc(dest-8, wrap, scale*8);
|
||||||
@ -1172,7 +1172,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
|||||||
{
|
{
|
||||||
int cbp, code, i;
|
int cbp, code, i;
|
||||||
uint8_t *coded_val;
|
uint8_t *coded_val;
|
||||||
uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ];
|
uint32_t * const mb_type_ptr = &s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride];
|
||||||
|
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
||||||
if (s->use_skip_mb_code) {
|
if (s->use_skip_mb_code) {
|
||||||
|
@ -44,9 +44,9 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f
|
|||||||
|
|
||||||
void ff_write_pass1_stats(MpegEncContext *s){
|
void ff_write_pass1_stats(MpegEncContext *s){
|
||||||
snprintf(s->avctx->stats_out, 256, "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d;\n",
|
snprintf(s->avctx->stats_out, 256, "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d;\n",
|
||||||
s->current_picture_ptr->display_picture_number, s->current_picture_ptr->coded_picture_number, s->pict_type,
|
s->current_picture_ptr->f.display_picture_number, s->current_picture_ptr->f.coded_picture_number, s->pict_type,
|
||||||
s->current_picture.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits,
|
s->current_picture.f.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits,
|
||||||
s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits);
|
s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline double qp2bits(RateControlEntry *rce, double qp){
|
static inline double qp2bits(RateControlEntry *rce, double qp){
|
||||||
@ -707,10 +707,10 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
|||||||
//if(dts_pic)
|
//if(dts_pic)
|
||||||
// av_log(NULL, AV_LOG_ERROR, "%Ld %Ld %Ld %d\n", s->current_picture_ptr->pts, s->user_specified_pts, dts_pic->pts, picture_number);
|
// av_log(NULL, AV_LOG_ERROR, "%Ld %Ld %Ld %d\n", s->current_picture_ptr->pts, s->user_specified_pts, dts_pic->pts, picture_number);
|
||||||
|
|
||||||
if(!dts_pic || dts_pic->pts == AV_NOPTS_VALUE)
|
if (!dts_pic || dts_pic->f.pts == AV_NOPTS_VALUE)
|
||||||
wanted_bits= (uint64_t)(s->bit_rate*(double)picture_number/fps);
|
wanted_bits= (uint64_t)(s->bit_rate*(double)picture_number/fps);
|
||||||
else
|
else
|
||||||
wanted_bits= (uint64_t)(s->bit_rate*(double)dts_pic->pts/fps);
|
wanted_bits = (uint64_t)(s->bit_rate*(double)dts_pic->f.pts / fps);
|
||||||
}
|
}
|
||||||
|
|
||||||
diff= s->total_bits - wanted_bits;
|
diff= s->total_bits - wanted_bits;
|
||||||
|
@ -142,7 +142,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
|||||||
|
|
||||||
mb_pos = row * s->mb_stride;
|
mb_pos = row * s->mb_stride;
|
||||||
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
||||||
int mbtype = s->current_picture_ptr->mb_type[mb_pos];
|
int mbtype = s->current_picture_ptr->f.mb_type[mb_pos];
|
||||||
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
|
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
|
||||||
r->deblock_coefs[mb_pos] = 0xFFFF;
|
r->deblock_coefs[mb_pos] = 0xFFFF;
|
||||||
if(IS_INTRA(mbtype))
|
if(IS_INTRA(mbtype))
|
||||||
@ -154,11 +154,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
|||||||
*/
|
*/
|
||||||
mb_pos = row * s->mb_stride;
|
mb_pos = row * s->mb_stride;
|
||||||
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
||||||
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
|
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
|
||||||
if(mb_x)
|
if(mb_x)
|
||||||
left_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - 1]];
|
left_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - 1]];
|
||||||
for(j = 0; j < 16; j += 4){
|
for(j = 0; j < 16; j += 4){
|
||||||
Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
|
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
|
||||||
for(i = !mb_x; i < 4; i++, Y += 4){
|
for(i = !mb_x; i < 4; i++, Y += 4){
|
||||||
int ij = i + j;
|
int ij = i + j;
|
||||||
loc_lim = 0;
|
loc_lim = 0;
|
||||||
@ -178,7 +178,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
|||||||
if(mb_x)
|
if(mb_x)
|
||||||
left_cbp = (r->cbp_chroma[mb_pos - 1] >> (k*4)) & 0xF;
|
left_cbp = (r->cbp_chroma[mb_pos - 1] >> (k*4)) & 0xF;
|
||||||
for(j = 0; j < 8; j += 4){
|
for(j = 0; j < 8; j += 4){
|
||||||
C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
|
C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
|
||||||
for(i = !mb_x; i < 2; i++, C += 4){
|
for(i = !mb_x; i < 2; i++, C += 4){
|
||||||
int ij = i + (j >> 1);
|
int ij = i + (j >> 1);
|
||||||
loc_lim = 0;
|
loc_lim = 0;
|
||||||
@ -196,11 +196,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
|||||||
}
|
}
|
||||||
mb_pos = row * s->mb_stride;
|
mb_pos = row * s->mb_stride;
|
||||||
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
||||||
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
|
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
|
||||||
if(row)
|
if(row)
|
||||||
top_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - s->mb_stride]];
|
top_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - s->mb_stride]];
|
||||||
for(j = 4*!row; j < 16; j += 4){
|
for(j = 4*!row; j < 16; j += 4){
|
||||||
Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
|
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
|
||||||
for(i = 0; i < 4; i++, Y += 4){
|
for(i = 0; i < 4; i++, Y += 4){
|
||||||
int ij = i + j;
|
int ij = i + j;
|
||||||
loc_lim = 0;
|
loc_lim = 0;
|
||||||
@ -220,7 +220,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
|||||||
if(row)
|
if(row)
|
||||||
top_cbp = (r->cbp_chroma[mb_pos - s->mb_stride] >> (k*4)) & 0xF;
|
top_cbp = (r->cbp_chroma[mb_pos - s->mb_stride] >> (k*4)) & 0xF;
|
||||||
for(j = 4*!row; j < 8; j += 4){
|
for(j = 4*!row; j < 8; j += 4){
|
||||||
C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
|
C = s->current_picture_ptr->f.data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
|
||||||
for(i = 0; i < 2; i++, C += 4){
|
for(i = 0; i < 2; i++, C += 4){
|
||||||
int ij = i + (j >> 1);
|
int ij = i + (j >> 1);
|
||||||
loc_lim = 0;
|
loc_lim = 0;
|
||||||
|
@ -439,13 +439,13 @@ static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types)
|
|||||||
if(!get_bits1(gb))
|
if(!get_bits1(gb))
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
|
||||||
}
|
}
|
||||||
s->current_picture_ptr->mb_type[mb_pos] = r->is16 ? MB_TYPE_INTRA16x16 : MB_TYPE_INTRA;
|
s->current_picture_ptr->f.mb_type[mb_pos] = r->is16 ? MB_TYPE_INTRA16x16 : MB_TYPE_INTRA;
|
||||||
r->block_type = r->is16 ? RV34_MB_TYPE_INTRA16x16 : RV34_MB_TYPE_INTRA;
|
r->block_type = r->is16 ? RV34_MB_TYPE_INTRA16x16 : RV34_MB_TYPE_INTRA;
|
||||||
}else{
|
}else{
|
||||||
r->block_type = r->decode_mb_info(r);
|
r->block_type = r->decode_mb_info(r);
|
||||||
if(r->block_type == -1)
|
if(r->block_type == -1)
|
||||||
return -1;
|
return -1;
|
||||||
s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
|
s->current_picture_ptr->f.mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
|
||||||
r->mb_type[mb_pos] = r->block_type;
|
r->mb_type[mb_pos] = r->block_type;
|
||||||
if(r->block_type == RV34_MB_SKIP){
|
if(r->block_type == RV34_MB_SKIP){
|
||||||
if(s->pict_type == AV_PICTURE_TYPE_P)
|
if(s->pict_type == AV_PICTURE_TYPE_P)
|
||||||
@ -453,7 +453,7 @@ static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types)
|
|||||||
if(s->pict_type == AV_PICTURE_TYPE_B)
|
if(s->pict_type == AV_PICTURE_TYPE_B)
|
||||||
r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
|
r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
|
||||||
}
|
}
|
||||||
r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
|
r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->f.mb_type[mb_pos]);
|
||||||
rv34_decode_mv(r, r->block_type);
|
rv34_decode_mv(r, r->block_type);
|
||||||
if(r->block_type == RV34_MB_SKIP){
|
if(r->block_type == RV34_MB_SKIP){
|
||||||
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
|
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
|
||||||
@ -462,7 +462,7 @@ static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types)
|
|||||||
r->chroma_vlc = 1;
|
r->chroma_vlc = 1;
|
||||||
r->luma_vlc = 0;
|
r->luma_vlc = 0;
|
||||||
}
|
}
|
||||||
if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
|
if(IS_INTRA(s->current_picture_ptr->f.mb_type[mb_pos])){
|
||||||
if(r->is16){
|
if(r->is16){
|
||||||
t = get_bits(gb, 2);
|
t = get_bits(gb, 2);
|
||||||
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
|
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
|
||||||
@ -527,27 +527,27 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int
|
|||||||
c_off = -1;
|
c_off = -1;
|
||||||
|
|
||||||
if(r->avail_cache[avail_index - 1]){
|
if(r->avail_cache[avail_index - 1]){
|
||||||
A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
|
A[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-1][0];
|
||||||
A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
|
A[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-1][1];
|
||||||
}
|
}
|
||||||
if(r->avail_cache[avail_index - 4]){
|
if(r->avail_cache[avail_index - 4]){
|
||||||
B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
|
B[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride][0];
|
||||||
B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
|
B[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride][1];
|
||||||
}else{
|
}else{
|
||||||
B[0] = A[0];
|
B[0] = A[0];
|
||||||
B[1] = A[1];
|
B[1] = A[1];
|
||||||
}
|
}
|
||||||
if(!r->avail_cache[avail_index - 4 + c_off]){
|
if(!r->avail_cache[avail_index - 4 + c_off]){
|
||||||
if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1] || r->rv30)){
|
if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1] || r->rv30)){
|
||||||
C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
|
C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride-1][0];
|
||||||
C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
|
C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride-1][1];
|
||||||
}else{
|
}else{
|
||||||
C[0] = A[0];
|
C[0] = A[0];
|
||||||
C[1] = A[1];
|
C[1] = A[1];
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
|
C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride+c_off][0];
|
||||||
C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
|
C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride+c_off][1];
|
||||||
}
|
}
|
||||||
mx = mid_pred(A[0], B[0], C[0]);
|
mx = mid_pred(A[0], B[0], C[0]);
|
||||||
my = mid_pred(A[1], B[1], C[1]);
|
my = mid_pred(A[1], B[1], C[1]);
|
||||||
@ -555,8 +555,8 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int
|
|||||||
my += r->dmv[dmv_no][1];
|
my += r->dmv[dmv_no][1];
|
||||||
for(j = 0; j < part_sizes_h[block_type]; j++){
|
for(j = 0; j < part_sizes_h[block_type]; j++){
|
||||||
for(i = 0; i < part_sizes_w[block_type]; i++){
|
for(i = 0; i < part_sizes_w[block_type]; i++){
|
||||||
s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
|
s->current_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
|
||||||
s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
|
s->current_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -611,28 +611,28 @@ static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
|
|||||||
int i, j;
|
int i, j;
|
||||||
Picture *cur_pic = s->current_picture_ptr;
|
Picture *cur_pic = s->current_picture_ptr;
|
||||||
const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
|
const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
|
||||||
int type = cur_pic->mb_type[mb_pos];
|
int type = cur_pic->f.mb_type[mb_pos];
|
||||||
|
|
||||||
memset(A, 0, sizeof(A));
|
memset(A, 0, sizeof(A));
|
||||||
memset(B, 0, sizeof(B));
|
memset(B, 0, sizeof(B));
|
||||||
memset(C, 0, sizeof(C));
|
memset(C, 0, sizeof(C));
|
||||||
if((r->avail_cache[6-1] & type) & mask){
|
if((r->avail_cache[6-1] & type) & mask){
|
||||||
A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
|
A[0] = cur_pic->f.motion_val[dir][mv_pos - 1][0];
|
||||||
A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
|
A[1] = cur_pic->f.motion_val[dir][mv_pos - 1][1];
|
||||||
has_A = 1;
|
has_A = 1;
|
||||||
}
|
}
|
||||||
if((r->avail_cache[6-4] & type) & mask){
|
if((r->avail_cache[6-4] & type) & mask){
|
||||||
B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
|
B[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride][0];
|
||||||
B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
|
B[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride][1];
|
||||||
has_B = 1;
|
has_B = 1;
|
||||||
}
|
}
|
||||||
if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
|
if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
|
||||||
C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
|
C[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride + 2][0];
|
||||||
C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
|
C[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride + 2][1];
|
||||||
has_C = 1;
|
has_C = 1;
|
||||||
}else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
|
}else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
|
||||||
C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
|
C[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride - 1][0];
|
||||||
C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
|
C[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride - 1][1];
|
||||||
has_C = 1;
|
has_C = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -643,12 +643,12 @@ static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
|
|||||||
|
|
||||||
for(j = 0; j < 2; j++){
|
for(j = 0; j < 2; j++){
|
||||||
for(i = 0; i < 2; i++){
|
for(i = 0; i < 2; i++){
|
||||||
cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
|
cur_pic->f.motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
|
||||||
cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
|
cur_pic->f.motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
|
if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
|
||||||
ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
|
ZERO8x2(cur_pic->f.motion_val[!dir][mv_pos], s->b8_stride);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -665,27 +665,27 @@ static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
|
|||||||
int avail_index = avail_indexes[0];
|
int avail_index = avail_indexes[0];
|
||||||
|
|
||||||
if(r->avail_cache[avail_index - 1]){
|
if(r->avail_cache[avail_index - 1]){
|
||||||
A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
|
A[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - 1][0];
|
||||||
A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
|
A[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - 1][1];
|
||||||
}
|
}
|
||||||
if(r->avail_cache[avail_index - 4]){
|
if(r->avail_cache[avail_index - 4]){
|
||||||
B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
|
B[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride][0];
|
||||||
B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
|
B[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride][1];
|
||||||
}else{
|
}else{
|
||||||
B[0] = A[0];
|
B[0] = A[0];
|
||||||
B[1] = A[1];
|
B[1] = A[1];
|
||||||
}
|
}
|
||||||
if(!r->avail_cache[avail_index - 4 + 2]){
|
if(!r->avail_cache[avail_index - 4 + 2]){
|
||||||
if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1])){
|
if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1])){
|
||||||
C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
|
C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride - 1][0];
|
||||||
C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
|
C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride - 1][1];
|
||||||
}else{
|
}else{
|
||||||
C[0] = A[0];
|
C[0] = A[0];
|
||||||
C[1] = A[1];
|
C[1] = A[1];
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+2][0];
|
C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride + 2][0];
|
||||||
C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+2][1];
|
C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride + 2][1];
|
||||||
}
|
}
|
||||||
mx = mid_pred(A[0], B[0], C[0]);
|
mx = mid_pred(A[0], B[0], C[0]);
|
||||||
my = mid_pred(A[1], B[1], C[1]);
|
my = mid_pred(A[1], B[1], C[1]);
|
||||||
@ -694,8 +694,8 @@ static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
|
|||||||
for(j = 0; j < 2; j++){
|
for(j = 0; j < 2; j++){
|
||||||
for(i = 0; i < 2; i++){
|
for(i = 0; i < 2; i++){
|
||||||
for(k = 0; k < 2; k++){
|
for(k = 0; k < 2; k++){
|
||||||
s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
|
s->current_picture_ptr->f.motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
|
||||||
s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
|
s->current_picture_ptr->f.motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -733,24 +733,24 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
|
|||||||
|
|
||||||
if(thirdpel){
|
if(thirdpel){
|
||||||
int chroma_mx, chroma_my;
|
int chroma_mx, chroma_my;
|
||||||
mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
|
mx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
|
||||||
my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
|
my = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
|
||||||
lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
|
lx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
|
||||||
ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
|
ly = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
|
||||||
chroma_mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + 1) >> 1;
|
chroma_mx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + 1) >> 1;
|
||||||
chroma_my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + 1) >> 1;
|
chroma_my = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + 1) >> 1;
|
||||||
umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
|
umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
|
||||||
umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
|
umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
|
||||||
uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
|
uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
|
||||||
uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
|
uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
|
||||||
}else{
|
}else{
|
||||||
int cx, cy;
|
int cx, cy;
|
||||||
mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
|
mx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] >> 2;
|
||||||
my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
|
my = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] >> 2;
|
||||||
lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
|
lx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] & 3;
|
||||||
ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
|
ly = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] & 3;
|
||||||
cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
|
cx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] / 2;
|
||||||
cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
|
cy = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] / 2;
|
||||||
umx = cx >> 2;
|
umx = cx >> 2;
|
||||||
umy = cy >> 2;
|
umy = cy >> 2;
|
||||||
uvmx = (cx & 3) << 1;
|
uvmx = (cx & 3) << 1;
|
||||||
@ -760,9 +760,9 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
|
|||||||
uvmx = uvmy = 4;
|
uvmx = uvmy = 4;
|
||||||
}
|
}
|
||||||
dxy = ly*4 + lx;
|
dxy = ly*4 + lx;
|
||||||
srcY = dir ? s->next_picture_ptr->data[0] : s->last_picture_ptr->data[0];
|
srcY = dir ? s->next_picture_ptr->f.data[0] : s->last_picture_ptr->f.data[0];
|
||||||
srcU = dir ? s->next_picture_ptr->data[1] : s->last_picture_ptr->data[1];
|
srcU = dir ? s->next_picture_ptr->f.data[1] : s->last_picture_ptr->f.data[1];
|
||||||
srcV = dir ? s->next_picture_ptr->data[2] : s->last_picture_ptr->data[2];
|
srcV = dir ? s->next_picture_ptr->f.data[2] : s->last_picture_ptr->f.data[2];
|
||||||
src_x = s->mb_x * 16 + xoff + mx;
|
src_x = s->mb_x * 16 + xoff + mx;
|
||||||
src_y = s->mb_y * 16 + yoff + my;
|
src_y = s->mb_y * 16 + yoff + my;
|
||||||
uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
|
uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
|
||||||
@ -870,31 +870,31 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type)
|
|||||||
switch(block_type){
|
switch(block_type){
|
||||||
case RV34_MB_TYPE_INTRA:
|
case RV34_MB_TYPE_INTRA:
|
||||||
case RV34_MB_TYPE_INTRA16x16:
|
case RV34_MB_TYPE_INTRA16x16:
|
||||||
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||||
return 0;
|
return 0;
|
||||||
case RV34_MB_SKIP:
|
case RV34_MB_SKIP:
|
||||||
if(s->pict_type == AV_PICTURE_TYPE_P){
|
if(s->pict_type == AV_PICTURE_TYPE_P){
|
||||||
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||||
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
|
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case RV34_MB_B_DIRECT:
|
case RV34_MB_B_DIRECT:
|
||||||
//surprisingly, it uses motion scheme from next reference frame
|
//surprisingly, it uses motion scheme from next reference frame
|
||||||
next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
|
next_bt = s->next_picture_ptr->f.mb_type[s->mb_x + s->mb_y * s->mb_stride];
|
||||||
if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
|
if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
|
||||||
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||||
ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
ZERO8x2(s->current_picture_ptr->f.motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||||
}else
|
}else
|
||||||
for(j = 0; j < 2; j++)
|
for(j = 0; j < 2; j++)
|
||||||
for(i = 0; i < 2; i++)
|
for(i = 0; i < 2; i++)
|
||||||
for(k = 0; k < 2; k++)
|
for(k = 0; k < 2; k++)
|
||||||
for(l = 0; l < 2; l++)
|
for(l = 0; l < 2; l++)
|
||||||
s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
|
s->current_picture_ptr->f.motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][k]);
|
||||||
if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
|
if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
|
||||||
rv34_mc_2mv(r, block_type);
|
rv34_mc_2mv(r, block_type);
|
||||||
else
|
else
|
||||||
rv34_mc_2mv_skip(r);
|
rv34_mc_2mv_skip(r);
|
||||||
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||||
break;
|
break;
|
||||||
case RV34_MB_P_16x16:
|
case RV34_MB_P_16x16:
|
||||||
case RV34_MB_P_MIX16x16:
|
case RV34_MB_P_MIX16x16:
|
||||||
@ -1128,7 +1128,7 @@ static int rv34_set_deblock_coef(RV34DecContext *r)
|
|||||||
MpegEncContext *s = &r->s;
|
MpegEncContext *s = &r->s;
|
||||||
int hmvmask = 0, vmvmask = 0, i, j;
|
int hmvmask = 0, vmvmask = 0, i, j;
|
||||||
int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
|
int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
|
||||||
int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
|
int16_t (*motion_val)[2] = &s->current_picture_ptr->f.motion_val[0][midx];
|
||||||
for(j = 0; j < 16; j += 8){
|
for(j = 0; j < 16; j += 8){
|
||||||
for(i = 0; i < 2; i++){
|
for(i = 0; i < 2; i++){
|
||||||
if(is_mv_diff_gt_3(motion_val + i, 1))
|
if(is_mv_diff_gt_3(motion_val + i, 1))
|
||||||
@ -1170,14 +1170,14 @@ static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types)
|
|||||||
dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
|
dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
|
||||||
if(s->mb_x && dist)
|
if(s->mb_x && dist)
|
||||||
r->avail_cache[5] =
|
r->avail_cache[5] =
|
||||||
r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
|
r->avail_cache[9] = s->current_picture_ptr->f.mb_type[mb_pos - 1];
|
||||||
if(dist >= s->mb_width)
|
if(dist >= s->mb_width)
|
||||||
r->avail_cache[2] =
|
r->avail_cache[2] =
|
||||||
r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
|
r->avail_cache[3] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride];
|
||||||
if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
|
if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
|
||||||
r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
|
r->avail_cache[4] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride + 1];
|
||||||
if(s->mb_x && dist > s->mb_width)
|
if(s->mb_x && dist > s->mb_width)
|
||||||
r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
|
r->avail_cache[1] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride - 1];
|
||||||
|
|
||||||
s->qscale = r->si.quant;
|
s->qscale = r->si.quant;
|
||||||
cbp = cbp2 = rv34_decode_mb_header(r, intra_types);
|
cbp = cbp2 = rv34_decode_mb_header(r, intra_types);
|
||||||
@ -1187,7 +1187,7 @@ static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types)
|
|||||||
r->deblock_coefs[mb_pos] = 0xFFFF;
|
r->deblock_coefs[mb_pos] = 0xFFFF;
|
||||||
else
|
else
|
||||||
r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
|
r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
|
||||||
s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
|
s->current_picture_ptr->f.qscale_table[mb_pos] = s->qscale;
|
||||||
|
|
||||||
if(cbp == -1)
|
if(cbp == -1)
|
||||||
return -1;
|
return -1;
|
||||||
@ -1221,7 +1221,7 @@ static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types)
|
|||||||
rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]],rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]]);
|
rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]],rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]]);
|
||||||
rv34_inv_transform(s->block[blknum] + blkoff);
|
rv34_inv_transform(s->block[blknum] + blkoff);
|
||||||
}
|
}
|
||||||
if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos]))
|
if (IS_INTRA(s->current_picture_ptr->f.mb_type[mb_pos]))
|
||||||
rv34_output_macroblock(r, intra_types, cbp2, r->is16);
|
rv34_output_macroblock(r, intra_types, cbp2, r->is16);
|
||||||
else
|
else
|
||||||
rv34_apply_differences(r, cbp2);
|
rv34_apply_differences(r, cbp2);
|
||||||
@ -1429,7 +1429,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
|||||||
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
|
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) && si.type == AV_PICTURE_TYPE_B)
|
if ((!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) && si.type == AV_PICTURE_TYPE_B)
|
||||||
return -1;
|
return -1;
|
||||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
|
if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
|
||||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
|
|| (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
|
||||||
|
@ -475,7 +475,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
|||||||
|
|
||||||
mb_pos = row * s->mb_stride;
|
mb_pos = row * s->mb_stride;
|
||||||
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
||||||
int mbtype = s->current_picture_ptr->mb_type[mb_pos];
|
int mbtype = s->current_picture_ptr->f.mb_type[mb_pos];
|
||||||
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
|
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
|
||||||
r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
|
r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
|
||||||
if(IS_INTRA(mbtype))
|
if(IS_INTRA(mbtype))
|
||||||
@ -489,7 +489,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
|||||||
int avail[4];
|
int avail[4];
|
||||||
int y_to_deblock, c_to_deblock[2];
|
int y_to_deblock, c_to_deblock[2];
|
||||||
|
|
||||||
q = s->current_picture_ptr->qscale_table[mb_pos];
|
q = s->current_picture_ptr->f.qscale_table[mb_pos];
|
||||||
alpha = rv40_alpha_tab[q];
|
alpha = rv40_alpha_tab[q];
|
||||||
beta = rv40_beta_tab [q];
|
beta = rv40_beta_tab [q];
|
||||||
betaY = betaC = beta * 3;
|
betaY = betaC = beta * 3;
|
||||||
@ -504,7 +504,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
|||||||
if(avail[i]){
|
if(avail[i]){
|
||||||
int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
|
int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
|
||||||
mvmasks[i] = r->deblock_coefs[pos];
|
mvmasks[i] = r->deblock_coefs[pos];
|
||||||
mbtype [i] = s->current_picture_ptr->mb_type[pos];
|
mbtype [i] = s->current_picture_ptr->f.mb_type[pos];
|
||||||
cbp [i] = r->cbp_luma[pos];
|
cbp [i] = r->cbp_luma[pos];
|
||||||
uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
|
uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
|
||||||
uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
|
uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
|
||||||
@ -563,7 +563,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for(j = 0; j < 16; j += 4){
|
for(j = 0; j < 16; j += 4){
|
||||||
Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
|
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
|
||||||
for(i = 0; i < 4; i++, Y += 4){
|
for(i = 0; i < 4; i++, Y += 4){
|
||||||
int ij = i + j;
|
int ij = i + j;
|
||||||
int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
|
int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
|
||||||
@ -607,7 +607,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
|||||||
}
|
}
|
||||||
for(k = 0; k < 2; k++){
|
for(k = 0; k < 2; k++){
|
||||||
for(j = 0; j < 2; j++){
|
for(j = 0; j < 2; j++){
|
||||||
C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
|
C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
|
||||||
for(i = 0; i < 2; i++, C += 4){
|
for(i = 0; i < 2; i++, C += 4){
|
||||||
int ij = i + j*2;
|
int ij = i + j*2;
|
||||||
int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
|
int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
|
||||||
|
@ -3441,8 +3441,8 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
|
|||||||
frame_start(s);
|
frame_start(s);
|
||||||
|
|
||||||
s->m.current_picture_ptr= &s->m.current_picture;
|
s->m.current_picture_ptr= &s->m.current_picture;
|
||||||
s->m.last_picture.pts= s->m.current_picture.pts;
|
s->m.last_picture.f.pts = s->m.current_picture.f.pts;
|
||||||
s->m.current_picture.pts= pict->pts;
|
s->m.current_picture.f.pts = pict->pts;
|
||||||
if(pict->pict_type == AV_PICTURE_TYPE_P){
|
if(pict->pict_type == AV_PICTURE_TYPE_P){
|
||||||
int block_width = (width +15)>>4;
|
int block_width = (width +15)>>4;
|
||||||
int block_height= (height+15)>>4;
|
int block_height= (height+15)>>4;
|
||||||
@ -3452,14 +3452,14 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
|
|||||||
assert(s->last_picture[0].data[0]);
|
assert(s->last_picture[0].data[0]);
|
||||||
|
|
||||||
s->m.avctx= s->avctx;
|
s->m.avctx= s->avctx;
|
||||||
s->m.current_picture.data[0]= s->current_picture.data[0];
|
s->m.current_picture.f.data[0] = s->current_picture.data[0];
|
||||||
s->m. last_picture.data[0]= s->last_picture[0].data[0];
|
s->m. last_picture.f.data[0] = s->last_picture[0].data[0];
|
||||||
s->m. new_picture.data[0]= s-> input_picture.data[0];
|
s->m. new_picture.f.data[0] = s-> input_picture.data[0];
|
||||||
s->m. last_picture_ptr= &s->m. last_picture;
|
s->m. last_picture_ptr= &s->m. last_picture;
|
||||||
s->m.linesize=
|
s->m.linesize=
|
||||||
s->m. last_picture.linesize[0]=
|
s->m. last_picture.f.linesize[0] =
|
||||||
s->m. new_picture.linesize[0]=
|
s->m. new_picture.f.linesize[0] =
|
||||||
s->m.current_picture.linesize[0]= stride;
|
s->m.current_picture.f.linesize[0] = stride;
|
||||||
s->m.uvlinesize= s->current_picture.linesize[1];
|
s->m.uvlinesize= s->current_picture.linesize[1];
|
||||||
s->m.width = width;
|
s->m.width = width;
|
||||||
s->m.height= height;
|
s->m.height= height;
|
||||||
@ -3646,9 +3646,9 @@ redo_frame:
|
|||||||
s->current_picture.quality = pict->quality;
|
s->current_picture.quality = pict->quality;
|
||||||
s->m.frame_bits = 8*(s->c.bytestream - s->c.bytestream_start);
|
s->m.frame_bits = 8*(s->c.bytestream - s->c.bytestream_start);
|
||||||
s->m.p_tex_bits = s->m.frame_bits - s->m.misc_bits - s->m.mv_bits;
|
s->m.p_tex_bits = s->m.frame_bits - s->m.misc_bits - s->m.mv_bits;
|
||||||
s->m.current_picture.display_picture_number =
|
s->m.current_picture.f.display_picture_number =
|
||||||
s->m.current_picture.coded_picture_number = avctx->frame_number;
|
s->m.current_picture.f.coded_picture_number = avctx->frame_number;
|
||||||
s->m.current_picture.quality = pict->quality;
|
s->m.current_picture.f.quality = pict->quality;
|
||||||
s->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
|
s->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
|
||||||
if(s->pass1_rc)
|
if(s->pass1_rc)
|
||||||
if (ff_rate_estimate_qscale(&s->m, 0) < 0)
|
if (ff_rate_estimate_qscale(&s->m, 0) < 0)
|
||||||
|
@ -689,12 +689,12 @@ static int svq1_decode_frame(AVCodecContext *avctx,
|
|||||||
linesize= s->uvlinesize;
|
linesize= s->uvlinesize;
|
||||||
}
|
}
|
||||||
|
|
||||||
current = s->current_picture.data[i];
|
current = s->current_picture.f.data[i];
|
||||||
|
|
||||||
if(s->pict_type==AV_PICTURE_TYPE_B){
|
if(s->pict_type==AV_PICTURE_TYPE_B){
|
||||||
previous = s->next_picture.data[i];
|
previous = s->next_picture.f.data[i];
|
||||||
}else{
|
}else{
|
||||||
previous = s->last_picture.data[i];
|
previous = s->last_picture.f.data[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_I) {
|
if (s->pict_type == AV_PICTURE_TYPE_I) {
|
||||||
|
@ -284,11 +284,11 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
|
|||||||
s->m.avctx= s->avctx;
|
s->m.avctx= s->avctx;
|
||||||
s->m.current_picture_ptr= &s->m.current_picture;
|
s->m.current_picture_ptr= &s->m.current_picture;
|
||||||
s->m.last_picture_ptr = &s->m.last_picture;
|
s->m.last_picture_ptr = &s->m.last_picture;
|
||||||
s->m.last_picture.data[0]= ref_plane;
|
s->m.last_picture.f.data[0] = ref_plane;
|
||||||
s->m.linesize=
|
s->m.linesize=
|
||||||
s->m.last_picture.linesize[0]=
|
s->m.last_picture.f.linesize[0] =
|
||||||
s->m.new_picture.linesize[0]=
|
s->m.new_picture.f.linesize[0] =
|
||||||
s->m.current_picture.linesize[0]= stride;
|
s->m.current_picture.f.linesize[0] = stride;
|
||||||
s->m.width= width;
|
s->m.width= width;
|
||||||
s->m.height= height;
|
s->m.height= height;
|
||||||
s->m.mb_width= block_width;
|
s->m.mb_width= block_width;
|
||||||
@ -318,9 +318,9 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
|
|||||||
s->m.current_picture.mb_mean= (uint8_t *)s->dummy;
|
s->m.current_picture.mb_mean= (uint8_t *)s->dummy;
|
||||||
s->m.current_picture.mb_var= (uint16_t*)s->dummy;
|
s->m.current_picture.mb_var= (uint16_t*)s->dummy;
|
||||||
s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy;
|
s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy;
|
||||||
s->m.current_picture.mb_type= s->dummy;
|
s->m.current_picture.f.mb_type = s->dummy;
|
||||||
|
|
||||||
s->m.current_picture.motion_val[0]= s->motion_val8[plane] + 2;
|
s->m.current_picture.f.motion_val[0] = s->motion_val8[plane] + 2;
|
||||||
s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1;
|
s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1;
|
||||||
s->m.dsp= s->dsp; //move
|
s->m.dsp= s->dsp; //move
|
||||||
ff_init_me(&s->m);
|
ff_init_me(&s->m);
|
||||||
@ -328,7 +328,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
|
|||||||
s->m.me.dia_size= s->avctx->dia_size;
|
s->m.me.dia_size= s->avctx->dia_size;
|
||||||
s->m.first_slice_line=1;
|
s->m.first_slice_line=1;
|
||||||
for (y = 0; y < block_height; y++) {
|
for (y = 0; y < block_height; y++) {
|
||||||
s->m.new_picture.data[0]= src - y*16*stride; //ugly
|
s->m.new_picture.f.data[0] = src - y*16*stride; //ugly
|
||||||
s->m.mb_y= y;
|
s->m.mb_y= y;
|
||||||
|
|
||||||
for(i=0; i<16 && i + 16*y<height; i++){
|
for(i=0; i<16 && i + 16*y<height; i++){
|
||||||
|
@ -288,8 +288,8 @@ static inline void svq3_mc_dir_part(MpegEncContext *s,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* form component predictions */
|
/* form component predictions */
|
||||||
dest = s->current_picture.data[0] + x + y*s->linesize;
|
dest = s->current_picture.f.data[0] + x + y*s->linesize;
|
||||||
src = pic->data[0] + mx + my*s->linesize;
|
src = pic->f.data[0] + mx + my*s->linesize;
|
||||||
|
|
||||||
if (emu) {
|
if (emu) {
|
||||||
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
|
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
|
||||||
@ -309,8 +309,8 @@ static inline void svq3_mc_dir_part(MpegEncContext *s,
|
|||||||
blocksize++;
|
blocksize++;
|
||||||
|
|
||||||
for (i = 1; i < 3; i++) {
|
for (i = 1; i < 3; i++) {
|
||||||
dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize;
|
dest = s->current_picture.f.data[i] + (x >> 1) + (y >> 1) * s->uvlinesize;
|
||||||
src = pic->data[i] + mx + my*s->uvlinesize;
|
src = pic->f.data[i] + mx + my * s->uvlinesize;
|
||||||
|
|
||||||
if (emu) {
|
if (emu) {
|
||||||
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
|
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
|
||||||
@ -347,8 +347,8 @@ static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir,
|
|||||||
if (mode != PREDICT_MODE) {
|
if (mode != PREDICT_MODE) {
|
||||||
pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my);
|
pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my);
|
||||||
} else {
|
} else {
|
||||||
mx = s->next_picture.motion_val[0][b_xy][0]<<1;
|
mx = s->next_picture.f.motion_val[0][b_xy][0] << 1;
|
||||||
my = s->next_picture.motion_val[0][b_xy][1]<<1;
|
my = s->next_picture.f.motion_val[0][b_xy][1] << 1;
|
||||||
|
|
||||||
if (dir == 0) {
|
if (dir == 0) {
|
||||||
mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
|
mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
|
||||||
@ -425,7 +425,9 @@ static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* write back motion vectors */
|
/* write back motion vectors */
|
||||||
fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4);
|
fill_rectangle(s->current_picture.f.motion_val[dir][b_xy],
|
||||||
|
part_width >> 2, part_height >> 2, h->b_stride,
|
||||||
|
pack16to32(mx, my), 4);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -448,7 +450,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
|||||||
h->topright_samples_available = 0xFFFF;
|
h->topright_samples_available = 0xFFFF;
|
||||||
|
|
||||||
if (mb_type == 0) { /* SKIP */
|
if (mb_type == 0) { /* SKIP */
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.mb_type[mb_xy] == -1) {
|
if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.f.mb_type[mb_xy] == -1) {
|
||||||
svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
|
svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
|
||||||
|
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
||||||
@ -457,7 +459,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
|||||||
|
|
||||||
mb_type = MB_TYPE_SKIP;
|
mb_type = MB_TYPE_SKIP;
|
||||||
} else {
|
} else {
|
||||||
mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6);
|
mb_type = FFMIN(s->next_picture.f.mb_type[mb_xy], 6);
|
||||||
if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
|
if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
|
if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
|
||||||
@ -486,7 +488,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
|||||||
for (m = 0; m < 2; m++) {
|
for (m = 0; m < 2; m++) {
|
||||||
if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6] != -1) {
|
if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6] != -1) {
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
*(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride];
|
*(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - 1 + i*h->b_stride];
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
@ -494,18 +496,18 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (s->mb_y > 0) {
|
if (s->mb_y > 0) {
|
||||||
memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
|
memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.f.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
|
||||||
memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
|
memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
|
||||||
|
|
||||||
if (s->mb_x < (s->mb_width - 1)) {
|
if (s->mb_x < (s->mb_width - 1)) {
|
||||||
*(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4];
|
*(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - h->b_stride + 4];
|
||||||
h->ref_cache[m][scan8[0] + 4 - 1*8] =
|
h->ref_cache[m][scan8[0] + 4 - 1*8] =
|
||||||
(h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1]+6] == -1 ||
|
(h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1]+6] == -1 ||
|
||||||
h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride ] ] == -1) ? PART_NOT_AVAILABLE : 1;
|
h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride ] ] == -1) ? PART_NOT_AVAILABLE : 1;
|
||||||
}else
|
}else
|
||||||
h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
|
h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
|
||||||
if (s->mb_x > 0) {
|
if (s->mb_x > 0) {
|
||||||
*(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1];
|
*(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - h->b_stride - 1];
|
||||||
h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] == -1) ? PART_NOT_AVAILABLE : 1;
|
h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] == -1) ? PART_NOT_AVAILABLE : 1;
|
||||||
}else
|
}else
|
||||||
h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
|
h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
|
||||||
@ -526,7 +528,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
|||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
|
memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (mb_type != 1) {
|
if (mb_type != 1) {
|
||||||
@ -534,7 +536,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
|||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
|
memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -621,11 +623,11 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
|||||||
|
|
||||||
if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
|
if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
|
memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
|
||||||
}
|
}
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
|
memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -706,7 +708,7 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
|||||||
}
|
}
|
||||||
|
|
||||||
h->cbp= cbp;
|
h->cbp= cbp;
|
||||||
s->current_picture.mb_type[mb_xy] = mb_type;
|
s->current_picture.f.mb_type[mb_xy] = mb_type;
|
||||||
|
|
||||||
if (IS_INTRA(mb_type)) {
|
if (IS_INTRA(mb_type)) {
|
||||||
h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8);
|
h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8);
|
||||||
@ -966,8 +968,8 @@ static int svq3_decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* for skipping the frame */
|
/* for skipping the frame */
|
||||||
s->current_picture.pict_type = s->pict_type;
|
s->current_picture.f.pict_type = s->pict_type;
|
||||||
s->current_picture.key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
|
s->current_picture.f.key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
|
||||||
|
|
||||||
/* Skip B-frames if we do not have reference frames. */
|
/* Skip B-frames if we do not have reference frames. */
|
||||||
if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B)
|
if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B)
|
||||||
@ -1051,7 +1053,7 @@ static int svq3_decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay) {
|
if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay) {
|
||||||
s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
|
s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
|
||||||
(s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
|
(s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -407,15 +407,15 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
|
|||||||
uint8_t *srcY, *srcU, *srcV;
|
uint8_t *srcY, *srcU, *srcV;
|
||||||
int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
|
int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
|
||||||
|
|
||||||
if(!v->s.last_picture.data[0])return;
|
if(!v->s.last_picture.f.data[0])return;
|
||||||
|
|
||||||
mx = s->mv[dir][0][0];
|
mx = s->mv[dir][0][0];
|
||||||
my = s->mv[dir][0][1];
|
my = s->mv[dir][0][1];
|
||||||
|
|
||||||
// store motion vectors for further use in B frames
|
// store motion vectors for further use in B frames
|
||||||
if(s->pict_type == AV_PICTURE_TYPE_P) {
|
if(s->pict_type == AV_PICTURE_TYPE_P) {
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
|
s->current_picture.f.motion_val[1][s->block_index[0]][0] = mx;
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][1] = my;
|
s->current_picture.f.motion_val[1][s->block_index[0]][1] = my;
|
||||||
}
|
}
|
||||||
uvmx = (mx + ((mx & 3) == 3)) >> 1;
|
uvmx = (mx + ((mx & 3) == 3)) >> 1;
|
||||||
uvmy = (my + ((my & 3) == 3)) >> 1;
|
uvmy = (my + ((my & 3) == 3)) >> 1;
|
||||||
@ -426,13 +426,13 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
|
|||||||
uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
|
uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
|
||||||
}
|
}
|
||||||
if(!dir) {
|
if(!dir) {
|
||||||
srcY = s->last_picture.data[0];
|
srcY = s->last_picture.f.data[0];
|
||||||
srcU = s->last_picture.data[1];
|
srcU = s->last_picture.f.data[1];
|
||||||
srcV = s->last_picture.data[2];
|
srcV = s->last_picture.f.data[2];
|
||||||
} else {
|
} else {
|
||||||
srcY = s->next_picture.data[0];
|
srcY = s->next_picture.f.data[0];
|
||||||
srcU = s->next_picture.data[1];
|
srcU = s->next_picture.f.data[1];
|
||||||
srcV = s->next_picture.data[2];
|
srcV = s->next_picture.f.data[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
src_x = s->mb_x * 16 + (mx >> 2);
|
src_x = s->mb_x * 16 + (mx >> 2);
|
||||||
@ -559,10 +559,10 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n)
|
|||||||
int dxy, mx, my, src_x, src_y;
|
int dxy, mx, my, src_x, src_y;
|
||||||
int off;
|
int off;
|
||||||
|
|
||||||
if(!v->s.last_picture.data[0])return;
|
if(!v->s.last_picture.f.data[0])return;
|
||||||
mx = s->mv[0][n][0];
|
mx = s->mv[0][n][0];
|
||||||
my = s->mv[0][n][1];
|
my = s->mv[0][n][1];
|
||||||
srcY = s->last_picture.data[0];
|
srcY = s->last_picture.f.data[0];
|
||||||
|
|
||||||
off = s->linesize * 4 * (n&2) + (n&1) * 8;
|
off = s->linesize * 4 * (n&2) + (n&1) * 8;
|
||||||
|
|
||||||
@ -647,7 +647,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v)
|
|||||||
int mvx[4], mvy[4], intra[4];
|
int mvx[4], mvy[4], intra[4];
|
||||||
static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
|
static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
|
||||||
|
|
||||||
if(!v->s.last_picture.data[0])return;
|
if(!v->s.last_picture.f.data[0])return;
|
||||||
if(s->flags & CODEC_FLAG_GRAY) return;
|
if(s->flags & CODEC_FLAG_GRAY) return;
|
||||||
|
|
||||||
for(i = 0; i < 4; i++) {
|
for(i = 0; i < 4; i++) {
|
||||||
@ -687,14 +687,14 @@ static void vc1_mc_4mv_chroma(VC1Context *v)
|
|||||||
tx = (mvx[t1] + mvx[t2]) / 2;
|
tx = (mvx[t1] + mvx[t2]) / 2;
|
||||||
ty = (mvy[t1] + mvy[t2]) / 2;
|
ty = (mvy[t1] + mvy[t2]) / 2;
|
||||||
} else {
|
} else {
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
|
s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
|
s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
|
||||||
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
|
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
|
||||||
return; //no need to do MC for inter blocks
|
return; //no need to do MC for inter blocks
|
||||||
}
|
}
|
||||||
|
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
|
s->current_picture.f.motion_val[1][s->block_index[0]][0] = tx;
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
|
s->current_picture.f.motion_val[1][s->block_index[0]][1] = ty;
|
||||||
uvmx = (tx + ((tx&3) == 3)) >> 1;
|
uvmx = (tx + ((tx&3) == 3)) >> 1;
|
||||||
uvmy = (ty + ((ty&3) == 3)) >> 1;
|
uvmy = (ty + ((ty&3) == 3)) >> 1;
|
||||||
v->luma_mv[s->mb_x][0] = uvmx;
|
v->luma_mv[s->mb_x][0] = uvmx;
|
||||||
@ -715,8 +715,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v)
|
|||||||
uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
|
uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
|
srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
|
||||||
srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
|
srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
|
||||||
if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
|
if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
|
||||||
|| (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
|
|| (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
|
||||||
|| (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
|
|| (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
|
||||||
@ -883,30 +883,30 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int m
|
|||||||
xy = s->block_index[n];
|
xy = s->block_index[n];
|
||||||
|
|
||||||
if(s->mb_intra){
|
if(s->mb_intra){
|
||||||
s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
|
s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
|
||||||
s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
|
s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
|
||||||
s->current_picture.motion_val[1][xy][0] = 0;
|
s->current_picture.f.motion_val[1][xy][0] = 0;
|
||||||
s->current_picture.motion_val[1][xy][1] = 0;
|
s->current_picture.f.motion_val[1][xy][1] = 0;
|
||||||
if(mv1) { /* duplicate motion data for 1-MV block */
|
if(mv1) { /* duplicate motion data for 1-MV block */
|
||||||
s->current_picture.motion_val[0][xy + 1][0] = 0;
|
s->current_picture.f.motion_val[0][xy + 1][0] = 0;
|
||||||
s->current_picture.motion_val[0][xy + 1][1] = 0;
|
s->current_picture.f.motion_val[0][xy + 1][1] = 0;
|
||||||
s->current_picture.motion_val[0][xy + wrap][0] = 0;
|
s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
|
||||||
s->current_picture.motion_val[0][xy + wrap][1] = 0;
|
s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
|
||||||
s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
|
s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
|
||||||
s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
|
s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
|
||||||
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
|
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
|
||||||
s->current_picture.motion_val[1][xy + 1][0] = 0;
|
s->current_picture.f.motion_val[1][xy + 1][0] = 0;
|
||||||
s->current_picture.motion_val[1][xy + 1][1] = 0;
|
s->current_picture.f.motion_val[1][xy + 1][1] = 0;
|
||||||
s->current_picture.motion_val[1][xy + wrap][0] = 0;
|
s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
|
||||||
s->current_picture.motion_val[1][xy + wrap][1] = 0;
|
s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
|
||||||
s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
|
s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
|
||||||
s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
|
s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
C = s->current_picture.motion_val[0][xy - 1];
|
C = s->current_picture.f.motion_val[0][xy - 1];
|
||||||
A = s->current_picture.motion_val[0][xy - wrap];
|
A = s->current_picture.f.motion_val[0][xy - wrap];
|
||||||
if(mv1)
|
if(mv1)
|
||||||
off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
|
off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
|
||||||
else {
|
else {
|
||||||
@ -925,7 +925,7 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int m
|
|||||||
off = -1;
|
off = -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
B = s->current_picture.motion_val[0][xy - wrap + off];
|
B = s->current_picture.f.motion_val[0][xy - wrap + off];
|
||||||
|
|
||||||
if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
|
if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
|
||||||
if(s->mb_width == 1) {
|
if(s->mb_width == 1) {
|
||||||
@ -989,15 +989,15 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int m
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* store MV using signed modulus of MV range defined in 4.11 */
|
/* store MV using signed modulus of MV range defined in 4.11 */
|
||||||
s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
|
s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
|
||||||
s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
|
s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
|
||||||
if(mv1) { /* duplicate motion data for 1-MV block */
|
if(mv1) { /* duplicate motion data for 1-MV block */
|
||||||
s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
|
s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
|
||||||
s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
|
s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
|
||||||
s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
|
s->current_picture.f.motion_val[0][xy + wrap][0] = s->current_picture.f.motion_val[0][xy][0];
|
||||||
s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
|
s->current_picture.f.motion_val[0][xy + wrap][1] = s->current_picture.f.motion_val[0][xy][1];
|
||||||
s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
|
s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
|
||||||
s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
|
s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1010,7 +1010,7 @@ static void vc1_interp_mc(VC1Context *v)
|
|||||||
uint8_t *srcY, *srcU, *srcV;
|
uint8_t *srcY, *srcU, *srcV;
|
||||||
int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
|
int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
|
||||||
|
|
||||||
if(!v->s.next_picture.data[0])return;
|
if(!v->s.next_picture.f.data[0])return;
|
||||||
|
|
||||||
mx = s->mv[1][0][0];
|
mx = s->mv[1][0][0];
|
||||||
my = s->mv[1][0][1];
|
my = s->mv[1][0][1];
|
||||||
@ -1020,9 +1020,9 @@ static void vc1_interp_mc(VC1Context *v)
|
|||||||
uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
|
uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
|
||||||
uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
|
uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
|
||||||
}
|
}
|
||||||
srcY = s->next_picture.data[0];
|
srcY = s->next_picture.f.data[0];
|
||||||
srcU = s->next_picture.data[1];
|
srcU = s->next_picture.f.data[1];
|
||||||
srcV = s->next_picture.data[2];
|
srcV = s->next_picture.f.data[2];
|
||||||
|
|
||||||
src_x = s->mb_x * 16 + (mx >> 2);
|
src_x = s->mb_x * 16 + (mx >> 2);
|
||||||
src_y = s->mb_y * 16 + (my >> 2);
|
src_y = s->mb_y * 16 + (my >> 2);
|
||||||
@ -1185,16 +1185,16 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
|
|||||||
xy = s->block_index[0];
|
xy = s->block_index[0];
|
||||||
|
|
||||||
if(s->mb_intra) {
|
if(s->mb_intra) {
|
||||||
s->current_picture.motion_val[0][xy][0] =
|
s->current_picture.f.motion_val[0][xy][0] =
|
||||||
s->current_picture.motion_val[0][xy][1] =
|
s->current_picture.f.motion_val[0][xy][1] =
|
||||||
s->current_picture.motion_val[1][xy][0] =
|
s->current_picture.f.motion_val[1][xy][0] =
|
||||||
s->current_picture.motion_val[1][xy][1] = 0;
|
s->current_picture.f.motion_val[1][xy][1] = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
|
s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
|
||||||
s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
|
s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
|
||||||
s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
|
s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
|
||||||
s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
|
s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
|
||||||
|
|
||||||
/* Pullback predicted motion vectors as specified in 8.4.5.4 */
|
/* Pullback predicted motion vectors as specified in 8.4.5.4 */
|
||||||
s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
|
s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
|
||||||
@ -1202,18 +1202,18 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
|
|||||||
s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
|
s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
|
||||||
s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
|
s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
|
||||||
if(direct) {
|
if(direct) {
|
||||||
s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
|
s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
|
||||||
s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
|
s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
|
||||||
s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
|
s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
|
||||||
s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
|
s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
|
if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
|
||||||
C = s->current_picture.motion_val[0][xy - 2];
|
C = s->current_picture.f.motion_val[0][xy - 2];
|
||||||
A = s->current_picture.motion_val[0][xy - wrap*2];
|
A = s->current_picture.f.motion_val[0][xy - wrap*2];
|
||||||
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
|
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
|
||||||
B = s->current_picture.motion_val[0][xy - wrap*2 + off];
|
B = s->current_picture.f.motion_val[0][xy - wrap*2 + off];
|
||||||
|
|
||||||
if(!s->mb_x) C[0] = C[1] = 0;
|
if(!s->mb_x) C[0] = C[1] = 0;
|
||||||
if(!s->first_slice_line) { // predictor A is not out of bounds
|
if(!s->first_slice_line) { // predictor A is not out of bounds
|
||||||
@ -1288,10 +1288,10 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
|
|||||||
s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
|
s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
|
||||||
}
|
}
|
||||||
if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
|
if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
|
||||||
C = s->current_picture.motion_val[1][xy - 2];
|
C = s->current_picture.f.motion_val[1][xy - 2];
|
||||||
A = s->current_picture.motion_val[1][xy - wrap*2];
|
A = s->current_picture.f.motion_val[1][xy - wrap*2];
|
||||||
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
|
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
|
||||||
B = s->current_picture.motion_val[1][xy - wrap*2 + off];
|
B = s->current_picture.f.motion_val[1][xy - wrap*2 + off];
|
||||||
|
|
||||||
if(!s->mb_x) C[0] = C[1] = 0;
|
if(!s->mb_x) C[0] = C[1] = 0;
|
||||||
if(!s->first_slice_line) { // predictor A is not out of bounds
|
if(!s->first_slice_line) { // predictor A is not out of bounds
|
||||||
@ -1366,10 +1366,10 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
|
|||||||
s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
|
s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
|
||||||
s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
|
s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
|
||||||
}
|
}
|
||||||
s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
|
s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
|
||||||
s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
|
s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
|
||||||
s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
|
s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
|
||||||
s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
|
s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Get predicted DC value for I-frames only
|
/** Get predicted DC value for I-frames only
|
||||||
@ -1464,14 +1464,14 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
|
|||||||
b = dc_val[ - 1 - wrap];
|
b = dc_val[ - 1 - wrap];
|
||||||
a = dc_val[ - wrap];
|
a = dc_val[ - wrap];
|
||||||
/* scale predictors if needed */
|
/* scale predictors if needed */
|
||||||
q1 = s->current_picture.qscale_table[mb_pos];
|
q1 = s->current_picture.f.qscale_table[mb_pos];
|
||||||
if(c_avail && (n!= 1 && n!=3)) {
|
if(c_avail && (n!= 1 && n!=3)) {
|
||||||
q2 = s->current_picture.qscale_table[mb_pos - 1];
|
q2 = s->current_picture.f.qscale_table[mb_pos - 1];
|
||||||
if(q2 && q2 != q1)
|
if(q2 && q2 != q1)
|
||||||
c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
|
c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
|
||||||
}
|
}
|
||||||
if(a_avail && (n!= 2 && n!=3)) {
|
if(a_avail && (n!= 2 && n!=3)) {
|
||||||
q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
|
q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
|
||||||
if(q2 && q2 != q1)
|
if(q2 && q2 != q1)
|
||||||
a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
|
a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
|
||||||
}
|
}
|
||||||
@ -1479,7 +1479,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
|
|||||||
int off = mb_pos;
|
int off = mb_pos;
|
||||||
if(n != 1) off--;
|
if(n != 1) off--;
|
||||||
if(n != 2) off -= s->mb_stride;
|
if(n != 2) off -= s->mb_stride;
|
||||||
q2 = s->current_picture.qscale_table[off];
|
q2 = s->current_picture.f.qscale_table[off];
|
||||||
if(q2 && q2 != q1)
|
if(q2 && q2 != q1)
|
||||||
b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
|
b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
|
||||||
}
|
}
|
||||||
@ -1853,9 +1853,9 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int c
|
|||||||
else //top
|
else //top
|
||||||
ac_val -= 16 * s->block_wrap[n];
|
ac_val -= 16 * s->block_wrap[n];
|
||||||
|
|
||||||
q1 = s->current_picture.qscale_table[mb_pos];
|
q1 = s->current_picture.f.qscale_table[mb_pos];
|
||||||
if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
|
if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.f.qscale_table[mb_pos - 1];
|
||||||
if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
|
if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
|
||||||
if(dc_pred_dir && n==1) q2 = q1;
|
if(dc_pred_dir && n==1) q2 = q1;
|
||||||
if(!dc_pred_dir && n==2) q2 = q1;
|
if(!dc_pred_dir && n==2) q2 = q1;
|
||||||
if(n==3) q2 = q1;
|
if(n==3) q2 = q1;
|
||||||
@ -2060,9 +2060,9 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int c
|
|||||||
else //top
|
else //top
|
||||||
ac_val -= 16 * s->block_wrap[n];
|
ac_val -= 16 * s->block_wrap[n];
|
||||||
|
|
||||||
q1 = s->current_picture.qscale_table[mb_pos];
|
q1 = s->current_picture.f.qscale_table[mb_pos];
|
||||||
if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
|
if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.f.qscale_table[mb_pos - 1];
|
||||||
if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
|
if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
|
||||||
if(dc_pred_dir && n==1) q2 = q1;
|
if(dc_pred_dir && n==1) q2 = q1;
|
||||||
if(!dc_pred_dir && n==2) q2 = q1;
|
if(!dc_pred_dir && n==2) q2 = q1;
|
||||||
if(n==3) q2 = q1;
|
if(n==3) q2 = q1;
|
||||||
@ -2344,7 +2344,7 @@ static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_
|
|||||||
bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4)) :
|
bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4)) :
|
||||||
(v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
|
(v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
|
||||||
mv_stride = s->b8_stride;
|
mv_stride = s->b8_stride;
|
||||||
mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
|
mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bottom_is_intra & 1 || block_is_intra & 1 ||
|
if (bottom_is_intra & 1 || block_is_intra & 1 ||
|
||||||
@ -2406,7 +2406,7 @@ static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_
|
|||||||
(mb_cbp >> ((block_num + 1) * 4));
|
(mb_cbp >> ((block_num + 1) * 4));
|
||||||
right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4)) :
|
right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4)) :
|
||||||
(mb_is_intra >> ((block_num + 1) * 4));
|
(mb_is_intra >> ((block_num + 1) * 4));
|
||||||
mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
|
mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
|
||||||
}
|
}
|
||||||
if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
|
if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
|
||||||
v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
|
v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
|
||||||
@ -2502,10 +2502,10 @@ static int vc1_decode_p_mb(VC1Context *v)
|
|||||||
GET_MVDATA(dmv_x, dmv_y);
|
GET_MVDATA(dmv_x, dmv_y);
|
||||||
|
|
||||||
if (s->mb_intra) {
|
if (s->mb_intra) {
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
|
s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
|
s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
|
||||||
}
|
}
|
||||||
s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
|
s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
|
||||||
vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
|
vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
|
||||||
|
|
||||||
/* FIXME Set DC val for inter block ? */
|
/* FIXME Set DC val for inter block ? */
|
||||||
@ -2526,7 +2526,7 @@ static int vc1_decode_p_mb(VC1Context *v)
|
|||||||
mquant = v->pq;
|
mquant = v->pq;
|
||||||
cbp = 0;
|
cbp = 0;
|
||||||
}
|
}
|
||||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
s->current_picture.f.qscale_table[mb_pos] = mquant;
|
||||||
|
|
||||||
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
|
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
|
||||||
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
|
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
|
||||||
@ -2576,8 +2576,8 @@ static int vc1_decode_p_mb(VC1Context *v)
|
|||||||
v->mb_type[0][s->block_index[i]] = 0;
|
v->mb_type[0][s->block_index[i]] = 0;
|
||||||
s->dc_val[0][s->block_index[i]] = 0;
|
s->dc_val[0][s->block_index[i]] = 0;
|
||||||
}
|
}
|
||||||
s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
|
s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
|
||||||
s->current_picture.qscale_table[mb_pos] = 0;
|
s->current_picture.f.qscale_table[mb_pos] = 0;
|
||||||
vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
|
vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
|
||||||
vc1_mc_1mv(v, 0);
|
vc1_mc_1mv(v, 0);
|
||||||
}
|
}
|
||||||
@ -2621,7 +2621,7 @@ static int vc1_decode_p_mb(VC1Context *v)
|
|||||||
if(!intra_count && !coded_inter)
|
if(!intra_count && !coded_inter)
|
||||||
goto end;
|
goto end;
|
||||||
GET_MQUANT();
|
GET_MQUANT();
|
||||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
s->current_picture.f.qscale_table[mb_pos] = mquant;
|
||||||
/* test if block is intra and has pred */
|
/* test if block is intra and has pred */
|
||||||
{
|
{
|
||||||
int intrapred = 0;
|
int intrapred = 0;
|
||||||
@ -2675,7 +2675,7 @@ static int vc1_decode_p_mb(VC1Context *v)
|
|||||||
else //Skipped MB
|
else //Skipped MB
|
||||||
{
|
{
|
||||||
s->mb_intra = 0;
|
s->mb_intra = 0;
|
||||||
s->current_picture.qscale_table[mb_pos] = 0;
|
s->current_picture.f.qscale_table[mb_pos] = 0;
|
||||||
for (i=0; i<6; i++) {
|
for (i=0; i<6; i++) {
|
||||||
v->mb_type[0][s->block_index[i]] = 0;
|
v->mb_type[0][s->block_index[i]] = 0;
|
||||||
s->dc_val[0][s->block_index[i]] = 0;
|
s->dc_val[0][s->block_index[i]] = 0;
|
||||||
@ -2686,7 +2686,7 @@ static int vc1_decode_p_mb(VC1Context *v)
|
|||||||
vc1_mc_4mv_luma(v, i);
|
vc1_mc_4mv_luma(v, i);
|
||||||
}
|
}
|
||||||
vc1_mc_4mv_chroma(v);
|
vc1_mc_4mv_chroma(v);
|
||||||
s->current_picture.qscale_table[mb_pos] = 0;
|
s->current_picture.f.qscale_table[mb_pos] = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
end:
|
end:
|
||||||
@ -2734,7 +2734,7 @@ static void vc1_decode_b_mb(VC1Context *v)
|
|||||||
v->mb_type[0][s->block_index[i]] = 0;
|
v->mb_type[0][s->block_index[i]] = 0;
|
||||||
s->dc_val[0][s->block_index[i]] = 0;
|
s->dc_val[0][s->block_index[i]] = 0;
|
||||||
}
|
}
|
||||||
s->current_picture.qscale_table[mb_pos] = 0;
|
s->current_picture.f.qscale_table[mb_pos] = 0;
|
||||||
|
|
||||||
if (!direct) {
|
if (!direct) {
|
||||||
if (!skipped) {
|
if (!skipped) {
|
||||||
@ -2770,7 +2770,7 @@ static void vc1_decode_b_mb(VC1Context *v)
|
|||||||
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
|
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
|
||||||
GET_MQUANT();
|
GET_MQUANT();
|
||||||
s->mb_intra = 0;
|
s->mb_intra = 0;
|
||||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
s->current_picture.f.qscale_table[mb_pos] = mquant;
|
||||||
if(!v->ttmbf)
|
if(!v->ttmbf)
|
||||||
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
|
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
|
||||||
dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
|
dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
|
||||||
@ -2785,7 +2785,7 @@ static void vc1_decode_b_mb(VC1Context *v)
|
|||||||
}
|
}
|
||||||
if(s->mb_intra && !mb_has_coeffs) {
|
if(s->mb_intra && !mb_has_coeffs) {
|
||||||
GET_MQUANT();
|
GET_MQUANT();
|
||||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
s->current_picture.f.qscale_table[mb_pos] = mquant;
|
||||||
s->ac_pred = get_bits1(gb);
|
s->ac_pred = get_bits1(gb);
|
||||||
cbp = 0;
|
cbp = 0;
|
||||||
vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
|
vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
|
||||||
@ -2807,7 +2807,7 @@ static void vc1_decode_b_mb(VC1Context *v)
|
|||||||
s->ac_pred = get_bits1(gb);
|
s->ac_pred = get_bits1(gb);
|
||||||
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
|
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
|
||||||
GET_MQUANT();
|
GET_MQUANT();
|
||||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
s->current_picture.f.qscale_table[mb_pos] = mquant;
|
||||||
if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
|
if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
|
||||||
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
|
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
|
||||||
}
|
}
|
||||||
@ -2898,10 +2898,10 @@ static void vc1_decode_i_blocks(VC1Context *v)
|
|||||||
dst[5] = s->dest[2];
|
dst[5] = s->dest[2];
|
||||||
s->dsp.clear_blocks(s->block[0]);
|
s->dsp.clear_blocks(s->block[0]);
|
||||||
mb_pos = s->mb_x + s->mb_y * s->mb_width;
|
mb_pos = s->mb_x + s->mb_y * s->mb_width;
|
||||||
s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
|
s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
|
||||||
s->current_picture.qscale_table[mb_pos] = v->pq;
|
s->current_picture.f.qscale_table[mb_pos] = v->pq;
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
|
s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
|
s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
|
||||||
|
|
||||||
// do actual MB decoding and displaying
|
// do actual MB decoding and displaying
|
||||||
cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
|
cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
|
||||||
@ -3029,9 +3029,9 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
|
|||||||
ff_update_block_index(s);
|
ff_update_block_index(s);
|
||||||
s->dsp.clear_blocks(block[0]);
|
s->dsp.clear_blocks(block[0]);
|
||||||
mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
||||||
s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
|
s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
|
s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
|
||||||
s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
|
s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
|
||||||
|
|
||||||
// do actual MB decoding and displaying
|
// do actual MB decoding and displaying
|
||||||
cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
|
cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
|
||||||
@ -3045,7 +3045,7 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
|
|||||||
|
|
||||||
GET_MQUANT();
|
GET_MQUANT();
|
||||||
|
|
||||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
s->current_picture.f.qscale_table[mb_pos] = mquant;
|
||||||
/* Set DC scale - y and c use the same */
|
/* Set DC scale - y and c use the same */
|
||||||
s->y_dc_scale = s->y_dc_scale_table[mquant];
|
s->y_dc_scale = s->y_dc_scale_table[mquant];
|
||||||
s->c_dc_scale = s->c_dc_scale_table[mquant];
|
s->c_dc_scale = s->c_dc_scale_table[mquant];
|
||||||
@ -3232,9 +3232,9 @@ static void vc1_decode_skip_blocks(VC1Context *v)
|
|||||||
s->mb_x = 0;
|
s->mb_x = 0;
|
||||||
ff_init_block_index(s);
|
ff_init_block_index(s);
|
||||||
ff_update_block_index(s);
|
ff_update_block_index(s);
|
||||||
memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
|
memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
|
||||||
memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
|
memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
|
||||||
memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
|
memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
|
||||||
ff_draw_horiz_band(s, s->mb_y * 16, 16);
|
ff_draw_horiz_band(s, s->mb_y * 16, 16);
|
||||||
s->first_slice_line = 0;
|
s->first_slice_line = 0;
|
||||||
}
|
}
|
||||||
@ -3601,7 +3601,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
/* We need to set current_picture_ptr before reading the header,
|
/* We need to set current_picture_ptr before reading the header,
|
||||||
* otherwise we cannot store anything in there. */
|
* otherwise we cannot store anything in there. */
|
||||||
if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
|
if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
|
||||||
int i= ff_find_unused_picture(s, 0);
|
int i= ff_find_unused_picture(s, 0);
|
||||||
s->current_picture_ptr= &s->picture[i];
|
s->current_picture_ptr= &s->picture[i];
|
||||||
}
|
}
|
||||||
@ -3699,8 +3699,8 @@ static int vc1_decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for skipping the frame
|
// for skipping the frame
|
||||||
s->current_picture.pict_type= s->pict_type;
|
s->current_picture.f.pict_type = s->pict_type;
|
||||||
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
|
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
|
||||||
|
|
||||||
/* skip B-frames if we don't have reference frames */
|
/* skip B-frames if we don't have reference frames */
|
||||||
if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)){
|
if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)){
|
||||||
@ -3756,8 +3756,8 @@ static int vc1_decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
MPV_frame_end(s);
|
MPV_frame_end(s);
|
||||||
|
|
||||||
assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
|
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
|
||||||
assert(s->current_picture.pict_type == s->pict_type);
|
assert(s->current_picture.f.pict_type == s->pict_type);
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||||
*pict= *(AVFrame*)s->current_picture_ptr;
|
*pict= *(AVFrame*)s->current_picture_ptr;
|
||||||
} else if (s->last_picture_ptr != NULL) {
|
} else if (s->last_picture_ptr != NULL) {
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
static void parse_mb_skip(Wmv2Context * w){
|
static void parse_mb_skip(Wmv2Context * w){
|
||||||
int mb_x, mb_y;
|
int mb_x, mb_y;
|
||||||
MpegEncContext * const s= &w->s;
|
MpegEncContext * const s= &w->s;
|
||||||
uint32_t * const mb_type= s->current_picture_ptr->mb_type;
|
uint32_t * const mb_type = s->current_picture_ptr->f.mb_type;
|
||||||
|
|
||||||
w->skip_type= get_bits(&s->gb, 2);
|
w->skip_type= get_bits(&s->gb, 2);
|
||||||
switch(w->skip_type){
|
switch(w->skip_type){
|
||||||
@ -257,11 +257,11 @@ static int16_t *wmv2_pred_motion(Wmv2Context *w, int *px, int *py){
|
|||||||
wrap = s->b8_stride;
|
wrap = s->b8_stride;
|
||||||
xy = s->block_index[0];
|
xy = s->block_index[0];
|
||||||
|
|
||||||
mot_val = s->current_picture.motion_val[0][xy];
|
mot_val = s->current_picture.f.motion_val[0][xy];
|
||||||
|
|
||||||
A = s->current_picture.motion_val[0][xy - 1];
|
A = s->current_picture.f.motion_val[0][xy - 1];
|
||||||
B = s->current_picture.motion_val[0][xy - wrap];
|
B = s->current_picture.f.motion_val[0][xy - wrap];
|
||||||
C = s->current_picture.motion_val[0][xy + 2 - wrap];
|
C = s->current_picture.f.motion_val[0][xy + 2 - wrap];
|
||||||
|
|
||||||
if(s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag)
|
if(s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag)
|
||||||
diff= FFMAX(FFABS(A[0] - B[0]), FFABS(A[1] - B[1]));
|
diff= FFMAX(FFABS(A[0] - B[0]), FFABS(A[1] - B[1]));
|
||||||
@ -343,7 +343,7 @@ int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
|||||||
if(w->j_type) return 0;
|
if(w->j_type) return 0;
|
||||||
|
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
||||||
if(IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])){
|
if (IS_SKIP(s->current_picture.f.mb_type[s->mb_y * s->mb_stride + s->mb_x])) {
|
||||||
/* skip mb */
|
/* skip mb */
|
||||||
s->mb_intra = 0;
|
s->mb_intra = 0;
|
||||||
for(i=0;i<6;i++)
|
for(i=0;i<6;i++)
|
||||||
|
Loading…
Reference in New Issue
Block a user