mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Remove unnecessary AVFrame pointer casts.
This commit is contained in:
parent
2f4b476e04
commit
562b6c744a
@ -246,7 +246,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
const AVFrame *pict, int *got_packet)
|
const AVFrame *pict, int *got_packet)
|
||||||
{
|
{
|
||||||
A64Context *c = avctx->priv_data;
|
A64Context *c = avctx->priv_data;
|
||||||
AVFrame *const p = (AVFrame *) & c->picture;
|
AVFrame *const p = &c->picture;
|
||||||
|
|
||||||
int frame;
|
int frame;
|
||||||
int x, y;
|
int x, y;
|
||||||
|
@ -51,7 +51,7 @@ avs_decode_frame(AVCodecContext * avctx,
|
|||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AvsContext *const avs = avctx->priv_data;
|
AvsContext *const avs = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
AVFrame *const p = (AVFrame *) & avs->picture;
|
AVFrame *const p = &avs->picture;
|
||||||
const uint8_t *table, *vect;
|
const uint8_t *table, *vect;
|
||||||
uint8_t *out;
|
uint8_t *out;
|
||||||
int i, j, x, y, stride, vect_w = 3, vect_h = 3;
|
int i, j, x, y, stride, vect_w = 3, vect_h = 3;
|
||||||
|
@ -27,8 +27,8 @@
|
|||||||
static av_cold int bmp_decode_init(AVCodecContext *avctx){
|
static av_cold int bmp_decode_init(AVCodecContext *avctx){
|
||||||
BMPContext *s = avctx->priv_data;
|
BMPContext *s = avctx->priv_data;
|
||||||
|
|
||||||
avcodec_get_frame_defaults((AVFrame*)&s->picture);
|
avcodec_get_frame_defaults(&s->picture);
|
||||||
avctx->coded_frame = (AVFrame*)&s->picture;
|
avctx->coded_frame = &s->picture;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -33,8 +33,8 @@ static const uint32_t rgb444_masks[] = { 0x0F00, 0x00F0, 0x000F };
|
|||||||
static av_cold int bmp_encode_init(AVCodecContext *avctx){
|
static av_cold int bmp_encode_init(AVCodecContext *avctx){
|
||||||
BMPContext *s = avctx->priv_data;
|
BMPContext *s = avctx->priv_data;
|
||||||
|
|
||||||
avcodec_get_frame_defaults((AVFrame*)&s->picture);
|
avcodec_get_frame_defaults(&s->picture);
|
||||||
avctx->coded_frame = (AVFrame*)&s->picture;
|
avctx->coded_frame = &s->picture;
|
||||||
|
|
||||||
switch (avctx->pix_fmt) {
|
switch (avctx->pix_fmt) {
|
||||||
case PIX_FMT_BGR24:
|
case PIX_FMT_BGR24:
|
||||||
@ -68,7 +68,7 @@ static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
const AVFrame *pict, int *got_packet)
|
const AVFrame *pict, int *got_packet)
|
||||||
{
|
{
|
||||||
BMPContext *s = avctx->priv_data;
|
BMPContext *s = avctx->priv_data;
|
||||||
AVFrame * const p= (AVFrame*)&s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize, ret;
|
int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize, ret;
|
||||||
const uint32_t *pal = NULL;
|
const uint32_t *pal = NULL;
|
||||||
int pad_bytes_per_row, pal_entries = 0, compression = BMP_RGB;
|
int pad_bytes_per_row, pal_entries = 0, compression = BMP_RGB;
|
||||||
|
@ -60,7 +60,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
{
|
{
|
||||||
FrapsContext * const s = avctx->priv_data;
|
FrapsContext * const s = avctx->priv_data;
|
||||||
|
|
||||||
avctx->coded_frame = (AVFrame*)&s->frame;
|
avctx->coded_frame = &s->frame;
|
||||||
avctx->pix_fmt= PIX_FMT_NONE; /* set in decode_frame */
|
avctx->pix_fmt= PIX_FMT_NONE; /* set in decode_frame */
|
||||||
|
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
@ -131,7 +131,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
FrapsContext * const s = avctx->priv_data;
|
FrapsContext * const s = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
AVFrame *frame = data;
|
||||||
AVFrame * const f = (AVFrame*)&s->frame;
|
AVFrame * const f = &s->frame;
|
||||||
uint32_t header;
|
uint32_t header;
|
||||||
unsigned int version,header_size;
|
unsigned int version,header_size;
|
||||||
unsigned int x, y;
|
unsigned int x, y;
|
||||||
|
@ -146,7 +146,7 @@ static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
const AVFrame *pict, int *got_packet)
|
const AVFrame *pict, int *got_packet)
|
||||||
{
|
{
|
||||||
GIFContext *s = avctx->priv_data;
|
GIFContext *s = avctx->priv_data;
|
||||||
AVFrame *const p = (AVFrame *)&s->picture;
|
AVFrame *const p = &s->picture;
|
||||||
uint8_t *outbuf_ptr, *end;
|
uint8_t *outbuf_ptr, *end;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ static int ir2_decode_frame(AVCodecContext *avctx,
|
|||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
Ir2Context * const s = avctx->priv_data;
|
Ir2Context * const s = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
AVFrame * const p= (AVFrame*)&s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
int start;
|
int start;
|
||||||
|
|
||||||
if(p->data[0])
|
if(p->data[0])
|
||||||
|
@ -232,7 +232,7 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
const AVFrame *pict, int *got_packet)
|
const AVFrame *pict, int *got_packet)
|
||||||
{
|
{
|
||||||
JpeglsContext * const s = avctx->priv_data;
|
JpeglsContext * const s = avctx->priv_data;
|
||||||
AVFrame * const p= (AVFrame*)&s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
const int near = avctx->prediction_method;
|
const int near = avctx->prediction_method;
|
||||||
PutBitContext pb, pb2;
|
PutBitContext pb, pb2;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
|
@ -163,7 +163,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
LOCOContext * const l = avctx->priv_data;
|
LOCOContext * const l = avctx->priv_data;
|
||||||
AVFrame * const p= (AVFrame*)&l->pic;
|
AVFrame * const p = &l->pic;
|
||||||
int decoded;
|
int decoded;
|
||||||
|
|
||||||
if(p->data[0])
|
if(p->data[0])
|
||||||
|
@ -242,7 +242,7 @@ static av_cold int decode_init(AVCodecContext *avctx){
|
|||||||
|
|
||||||
static av_cold int decode_init_thread_copy(AVCodecContext *avctx){
|
static av_cold int decode_init_thread_copy(AVCodecContext *avctx){
|
||||||
MDECContext * const a = avctx->priv_data;
|
MDECContext * const a = avctx->priv_data;
|
||||||
AVFrame *p = (AVFrame*)&a->picture;
|
AVFrame *p = &a->picture;
|
||||||
|
|
||||||
avctx->coded_frame = p;
|
avctx->coded_frame = p;
|
||||||
a->avctx= avctx;
|
a->avctx= avctx;
|
||||||
|
@ -29,7 +29,7 @@ static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
const AVFrame *pict, int *got_packet)
|
const AVFrame *pict, int *got_packet)
|
||||||
{
|
{
|
||||||
PNMContext *s = avctx->priv_data;
|
PNMContext *s = avctx->priv_data;
|
||||||
AVFrame * const p = (AVFrame*)&s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
int i, h, w, n, linesize, depth, maxval, ret;
|
int i, h, w, n, linesize, depth, maxval, ret;
|
||||||
const char *tuple_type;
|
const char *tuple_type;
|
||||||
uint8_t *ptr;
|
uint8_t *ptr;
|
||||||
|
@ -189,8 +189,8 @@ av_cold int ff_pnm_init(AVCodecContext *avctx)
|
|||||||
{
|
{
|
||||||
PNMContext *s = avctx->priv_data;
|
PNMContext *s = avctx->priv_data;
|
||||||
|
|
||||||
avcodec_get_frame_defaults((AVFrame*)&s->picture);
|
avcodec_get_frame_defaults(&s->picture);
|
||||||
avctx->coded_frame = (AVFrame*)&s->picture;
|
avctx->coded_frame = &s->picture;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
PNMContext * const s = avctx->priv_data;
|
PNMContext * const s = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
AVFrame * const p = (AVFrame*)&s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
int i, j, n, linesize, h, upgrade = 0;
|
int i, j, n, linesize, h, upgrade = 0;
|
||||||
unsigned char *ptr;
|
unsigned char *ptr;
|
||||||
int components, sample_len;
|
int components, sample_len;
|
||||||
|
@ -29,7 +29,7 @@ static int pnm_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
const AVFrame *pict, int *got_packet)
|
const AVFrame *pict, int *got_packet)
|
||||||
{
|
{
|
||||||
PNMContext *s = avctx->priv_data;
|
PNMContext *s = avctx->priv_data;
|
||||||
AVFrame * const p = (AVFrame*)&s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
int i, h, h1, c, n, linesize, ret;
|
int i, h, h1, c, n, linesize, ret;
|
||||||
uint8_t *ptr, *ptr1, *ptr2;
|
uint8_t *ptr, *ptr1, *ptr2;
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
QdrawContext * const a = avctx->priv_data;
|
QdrawContext * const a = avctx->priv_data;
|
||||||
AVFrame * const p= (AVFrame*)&a->pic;
|
AVFrame * const p = &a->pic;
|
||||||
uint8_t* outdata;
|
uint8_t* outdata;
|
||||||
int colors;
|
int colors;
|
||||||
int i;
|
int i;
|
||||||
|
@ -254,7 +254,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
QpegContext * const a = avctx->priv_data;
|
QpegContext * const a = avctx->priv_data;
|
||||||
AVFrame * const p= (AVFrame*)&a->pic;
|
AVFrame * const p = &a->pic;
|
||||||
uint8_t* outdata;
|
uint8_t* outdata;
|
||||||
int delta;
|
int delta;
|
||||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||||
@ -297,7 +297,7 @@ static av_cold int decode_init(AVCodecContext *avctx){
|
|||||||
|
|
||||||
static av_cold int decode_end(AVCodecContext *avctx){
|
static av_cold int decode_end(AVCodecContext *avctx){
|
||||||
QpegContext * const a = avctx->priv_data;
|
QpegContext * const a = avctx->priv_data;
|
||||||
AVFrame * const p= (AVFrame*)&a->pic;
|
AVFrame * const p = &a->pic;
|
||||||
|
|
||||||
if(p->data[0])
|
if(p->data[0])
|
||||||
avctx->release_buffer(avctx, p);
|
avctx->release_buffer(avctx, p);
|
||||||
|
@ -120,8 +120,8 @@ static int raw_decode(AVCodecContext *avctx,
|
|||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
RawVideoContext *context = avctx->priv_data;
|
RawVideoContext *context = avctx->priv_data;
|
||||||
|
|
||||||
AVFrame * frame = (AVFrame *) data;
|
AVFrame *frame = data;
|
||||||
AVPicture * picture = (AVPicture *) data;
|
AVPicture *picture = data;
|
||||||
|
|
||||||
frame->pict_type = avctx->coded_frame->pict_type;
|
frame->pict_type = avctx->coded_frame->pict_type;
|
||||||
frame->interlaced_frame = avctx->coded_frame->interlaced_frame;
|
frame->interlaced_frame = avctx->coded_frame->interlaced_frame;
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
|
|
||||||
static av_cold int raw_init_encoder(AVCodecContext *avctx)
|
static av_cold int raw_init_encoder(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
avctx->coded_frame = (AVFrame *)avctx->priv_data;
|
avctx->coded_frame = avctx->priv_data;
|
||||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
avctx->coded_frame->key_frame = 1;
|
avctx->coded_frame->key_frame = 1;
|
||||||
avctx->bits_per_coded_sample = av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]);
|
avctx->bits_per_coded_sample = av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]);
|
||||||
|
@ -473,7 +473,7 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
|
|||||||
SVQ1Context * const s = avctx->priv_data;
|
SVQ1Context * const s = avctx->priv_data;
|
||||||
|
|
||||||
ff_dsputil_init(&s->dsp, avctx);
|
ff_dsputil_init(&s->dsp, avctx);
|
||||||
avctx->coded_frame= (AVFrame*)&s->picture;
|
avctx->coded_frame = &s->picture;
|
||||||
|
|
||||||
s->frame_width = avctx->width;
|
s->frame_width = avctx->width;
|
||||||
s->frame_height = avctx->height;
|
s->frame_height = avctx->height;
|
||||||
@ -501,7 +501,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
const AVFrame *pict, int *got_packet)
|
const AVFrame *pict, int *got_packet)
|
||||||
{
|
{
|
||||||
SVQ1Context * const s = avctx->priv_data;
|
SVQ1Context * const s = avctx->priv_data;
|
||||||
AVFrame * const p= (AVFrame*)&s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
AVFrame temp;
|
AVFrame temp;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
||||||
TargaContext * const s = avctx->priv_data;
|
TargaContext * const s = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
AVFrame * const p= (AVFrame*)&s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
uint8_t *dst;
|
uint8_t *dst;
|
||||||
int stride;
|
int stride;
|
||||||
int idlen, compr, y, w, h, bpp, flags;
|
int idlen, compr, y, w, h, bpp, flags;
|
||||||
@ -257,8 +257,8 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
static av_cold int targa_init(AVCodecContext *avctx){
|
static av_cold int targa_init(AVCodecContext *avctx){
|
||||||
TargaContext *s = avctx->priv_data;
|
TargaContext *s = avctx->priv_data;
|
||||||
|
|
||||||
avcodec_get_frame_defaults((AVFrame*)&s->picture);
|
avcodec_get_frame_defaults(&s->picture);
|
||||||
avctx->coded_frame= (AVFrame*)&s->picture;
|
avctx->coded_frame = &s->picture;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -506,7 +506,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
TiffContext * const s = avctx->priv_data;
|
TiffContext * const s = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
AVFrame * const p= (AVFrame*)&s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
const uint8_t *orig_buf = buf, *end_buf = buf + buf_size;
|
const uint8_t *orig_buf = buf, *end_buf = buf + buf_size;
|
||||||
unsigned off;
|
unsigned off;
|
||||||
int id, le, ret;
|
int id, le, ret;
|
||||||
@ -619,8 +619,8 @@ static av_cold int tiff_init(AVCodecContext *avctx){
|
|||||||
s->width = 0;
|
s->width = 0;
|
||||||
s->height = 0;
|
s->height = 0;
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
avcodec_get_frame_defaults((AVFrame*)&s->picture);
|
avcodec_get_frame_defaults(&s->picture);
|
||||||
avctx->coded_frame= (AVFrame*)&s->picture;
|
avctx->coded_frame = &s->picture;
|
||||||
ff_lzw_decode_open(&s->lzw);
|
ff_lzw_decode_open(&s->lzw);
|
||||||
ff_ccitt_unpack_init();
|
ff_ccitt_unpack_init();
|
||||||
|
|
||||||
|
@ -204,7 +204,7 @@ static int encode_frame(AVCodecContext * avctx, AVPacket *pkt,
|
|||||||
const AVFrame *pict, int *got_packet)
|
const AVFrame *pict, int *got_packet)
|
||||||
{
|
{
|
||||||
TiffEncoderContext *s = avctx->priv_data;
|
TiffEncoderContext *s = avctx->priv_data;
|
||||||
AVFrame *const p = (AVFrame *) & s->picture;
|
AVFrame *const p = &s->picture;
|
||||||
int i;
|
int i;
|
||||||
uint8_t *ptr;
|
uint8_t *ptr;
|
||||||
uint8_t *offset;
|
uint8_t *offset;
|
||||||
|
@ -764,7 +764,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
TM2Context * const l = avctx->priv_data;
|
TM2Context * const l = avctx->priv_data;
|
||||||
AVFrame * const p= (AVFrame*)&l->pic;
|
AVFrame * const p = &l->pic;
|
||||||
int i, skip, t;
|
int i, skip, t;
|
||||||
uint8_t *swbuf;
|
uint8_t *swbuf;
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ static av_cold int ulti_decode_init(AVCodecContext *avctx)
|
|||||||
s->height = avctx->height;
|
s->height = avctx->height;
|
||||||
s->blocks = (s->width / 8) * (s->height / 8);
|
s->blocks = (s->width / 8) * (s->height / 8);
|
||||||
avctx->pix_fmt = PIX_FMT_YUV410P;
|
avctx->pix_fmt = PIX_FMT_YUV410P;
|
||||||
avctx->coded_frame = (AVFrame*) &s->frame;
|
avctx->coded_frame = &s->frame;
|
||||||
s->ulti_codebook = ulti_codebook;
|
s->ulti_codebook = ulti_codebook;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -49,7 +49,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
VCR1Context * const a = avctx->priv_data;
|
VCR1Context * const a = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
AVFrame * const p= (AVFrame*)&a->picture;
|
AVFrame * const p = &a->picture;
|
||||||
const uint8_t *bytestream= buf;
|
const uint8_t *bytestream= buf;
|
||||||
int i, x, y;
|
int i, x, y;
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
|
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
|
||||||
VCR1Context * const a = avctx->priv_data;
|
VCR1Context * const a = avctx->priv_data;
|
||||||
AVFrame *pict = data;
|
AVFrame *pict = data;
|
||||||
AVFrame * const p= (AVFrame*)&a->picture;
|
AVFrame * const p = &a->picture;
|
||||||
int size;
|
int size;
|
||||||
|
|
||||||
*p = *pict;
|
*p = *pict;
|
||||||
@ -141,7 +141,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
|
|||||||
static av_cold void common_init(AVCodecContext *avctx){
|
static av_cold void common_init(AVCodecContext *avctx){
|
||||||
VCR1Context * const a = avctx->priv_data;
|
VCR1Context * const a = avctx->priv_data;
|
||||||
|
|
||||||
avctx->coded_frame= (AVFrame*)&a->picture;
|
avctx->coded_frame = &a->picture;
|
||||||
a->avctx= avctx;
|
a->avctx= avctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
WNV1Context * const l = avctx->priv_data;
|
WNV1Context * const l = avctx->priv_data;
|
||||||
AVFrame * const p= (AVFrame*)&l->pic;
|
AVFrame * const p = &l->pic;
|
||||||
unsigned char *Y,*U,*V;
|
unsigned char *Y,*U,*V;
|
||||||
int i, j;
|
int i, j;
|
||||||
int prev_y = 0, prev_u = 0, prev_v = 0;
|
int prev_y = 0, prev_u = 0, prev_v = 0;
|
||||||
|
@ -45,7 +45,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
VideoXLContext * const a = avctx->priv_data;
|
VideoXLContext * const a = avctx->priv_data;
|
||||||
AVFrame * const p= (AVFrame*)&a->pic;
|
AVFrame * const p = &a->pic;
|
||||||
uint8_t *Y, *U, *V;
|
uint8_t *Y, *U, *V;
|
||||||
int i, j;
|
int i, j;
|
||||||
int stride;
|
int stride;
|
||||||
|
@ -313,7 +313,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
avctx->coded_frame = (AVFrame*)&c->pic;
|
avctx->coded_frame = &c->pic;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user